Compare commits

..

99 Commits

Author SHA1 Message Date
(no author)
758075df86 This commit was manufactured by cvs2svn to create tag 'CacheMergeWithTrunk'.
git-svn-id: svn://10.0.0.236/tags/CacheMergeWithTrunk@55505 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-07 01:40:46 +00:00
fur%netscape.com
76159a2caf Updated to NPL 1.1
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55504 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-07 01:40:45 +00:00
fur%netscape.com
1988cb05b5 Tweak comments
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55499 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-07 01:14:55 +00:00
fur%netscape.com
dd8c6b3bed Added nsINetDataCacheManager::SetDiskCacheFolder()
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55484 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-07 00:02:58 +00:00
fur%netscape.com
bc64749366 Account for arg changes in NewChannel() API
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55475 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-06 23:30:30 +00:00
fur%netscape.com
2e9287bd58 Match NewChannel() API changes on trunk
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55472 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-06 23:23:07 +00:00
fur%netscape.com
0f2274a140 Checkpoint
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55471 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-06 23:22:37 +00:00
fur%netscape.com
957fa10cac Retire nsINetDataCache::GetCapacity
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55469 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-06 23:16:10 +00:00
fur%netscape.com
e603146886 Track trunk API changes
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55467 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-06 23:15:32 +00:00
fur%netscape.com
cb698a0df1 + Retired nsINetDataCache::GetCapacity()
+ Fixed gcc build problem


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55466 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-06 23:14:38 +00:00
fur%netscape.com
1b918d9ef0 + Added proxy channel arg to NewChannel()
+ Changed name of setProtocolPrivate/getProtocolPrivate to setAnnotation/getAnnotation\
+ Added inUse attribute
+ Touched up comments


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55465 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-06 23:08:08 +00:00
fur%netscape.com
6407144c9f Removed capacity attribute
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55464 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-06 23:06:44 +00:00
fur%netscape.com
2ed563a178 Merge with trunk makefile.win
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@55462 18797224-902f-48f8-a5cc-f745e15eee43
1999-12-06 23:05:39 +00:00
fur%netscape.com
4becc0b508 Add assertion to cache manager to ensure that it is limiting cache occupancy
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54566 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-29 15:23:34 +00:00
fur%netscape.com
7d5427ea31 Obey the MAX_CONTENT_LENGTH limit
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54565 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-29 15:22:56 +00:00
fur%netscape.com
99904bcc48 Eliminate libs build target, as Warren has done for the rest of the tree
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54545 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-29 02:15:04 +00:00
fur%netscape.com
bcb56c9593 Add strong ref to channel
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54544 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-29 02:14:17 +00:00
fur%netscape.com
f18bc8e6cd Fix ownership issues. Change SetProtocolData/GetProtocolData args
to match new prototype.


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54109 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-21 07:51:56 +00:00
fur%netscape.com
7d14c5669a Handle NULL load group - they're supposed to be optional
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54108 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-21 07:51:14 +00:00
fur%netscape.com
c0dd3df02e Fix tons of ref-counting ownership issues and other bug fixes
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54107 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-21 07:50:25 +00:00
fur%netscape.com
e5cc84978f Checkpoint
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54097 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-21 05:08:20 +00:00
fur%netscape.com
5694537330 Changed SetProtocolData/GetProtocolData to accept a tag argument so that
multiple cache clients can attach info to the cache database.


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54096 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-21 05:07:40 +00:00
fur%netscape.com
297c5ceba3 Add/modify APIs to track nsIChannel
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54095 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-21 05:05:01 +00:00
fur%netscape.com
e0311312f7 Temporarily disable pref-reading code, since it doesn't work in the browser
and the code that measures the size of the cache db, since it's a performance
hog.


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54094 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-21 05:04:13 +00:00
fur%netscape.com
cf3dc77b02 Fix unitialized variable
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@54093 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-21 05:01:03 +00:00
fur%netscape.com
dd2506a737 Quash warnings
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53842 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-18 06:19:51 +00:00
fur%netscape.com
b1bee1f21c Merge with trunk
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53831 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-18 05:38:26 +00:00
fur%netscape.com
48ecc5625b Added review comments
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53674 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-16 19:46:28 +00:00
fur%netscape.com
1b791685e6 No longer need factory code. Its been moved to netwerk/cache/builds/nsNetDataCacheModule.cpp
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53663 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-16 18:21:11 +00:00
fur%netscape.com
dc94d1d6e2 Added review comments
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53647 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-16 10:08:26 +00:00
hoa.nguyen%intel.com
f5b437ade3 added Unix support
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53574 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-16 00:21:05 +00:00
hoa.nguyen%intel.com
8c0b4b3e4a changed NPL to MPL
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53573 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-16 00:16:33 +00:00
hoa.nguyen%intel.com
fb640ab144 added Truncate function
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53572 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-16 00:07:27 +00:00
hoa.nguyen%intel.com
106e263b33 added support for memory cache
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53570 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-16 00:00:54 +00:00
fur%netscape.com
85045a8552 Add TestCacheMgr
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53537 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 21:13:19 +00:00
fur%netscape.com
a2279be132 *** empty log message ***
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53536 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 21:12:58 +00:00
fur%netscape.com
0ce702d402 Don't call NS_ERROR() when a record ID is not found
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53533 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 21:12:17 +00:00
fur%netscape.com
f14d03cd67 Fixed CommitFlags()
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53531 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 21:11:14 +00:00
fur%netscape.com
b91343fdf6 Checkpoint
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53520 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 18:30:44 +00:00
fur%netscape.com
2c517489b5 Disable warning, so cache code can run
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53519 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 17:39:35 +00:00
(no author)
ae57da58eb This commit was manufactured by cvs2svn to create branch
'CacheIntegration_BRANCH'.

git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53517 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 17:10:07 +00:00
fur%netscape.com
8d4586dd65 Update components table and macro instantiations to conform to new definitions
in nsIGenericFactory.h


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53499 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 09:06:41 +00:00
fur%netscape.com
9b473ad9be Added starting offset param to interceptAsyncRead() method
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53498 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 08:53:15 +00:00
fur%netscape.com
2ad227a994 Merged with trunk
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53496 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 08:01:58 +00:00
fur%netscape.com
94d8da33c1 Replace 1.0 NPL with 1.1 version
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53487 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 06:13:13 +00:00
fur%netscape.com
81c05809fd Remove dead files
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53486 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 06:08:40 +00:00
fur%netscape.com
e30547b2b2 Remove dead files
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53485 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 05:52:29 +00:00
fur%netscape.com
257f9cfaaa Fix Boogs. Replace 1.0 NPL with 1.1 version
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53484 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-15 05:51:02 +00:00
fur%netscape.com
96e2654e43 Replace 1.0 NPL with 1.1 NPL
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53474 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-14 20:35:26 +00:00
fur%netscape.com
3b023433be Replace 1.0 NPL with 1.1 version
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53472 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-14 19:56:44 +00:00
fur%netscape.com
1b89716afe Added more comments
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53471 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-14 19:50:30 +00:00
fur%netscape.com
ad02058877 Add comments. Change method names
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53470 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-14 18:51:57 +00:00
fur%netscape.com
fa8a3196e7 Merge with trunk
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53434 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-13 19:35:49 +00:00
fur%netscape.com
5c2c543e58 Fixed bugs which prevented embedded NUL characters
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53431 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-13 18:45:28 +00:00
fur%netscape.com
c588721cc0 Added NS_NewStorageStream().
Changed method name, Initialize ==> Init


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53430 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-13 18:43:20 +00:00
fur%netscape.com
0b049b17ba Fix Boogs
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53429 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-13 18:41:01 +00:00
fur%netscape.com
854ef4631d Merge from trunk
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53268 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-11 22:29:20 +00:00
fur%netscape.com
051c558653 Detect failure to truncate cache entry
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53214 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-11 18:16:24 +00:00
fur%netscape.com
37a04adb09 Killed build warnings. Added stubs for unimplemented methods
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53072 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-10 06:05:33 +00:00
fur%netscape.com
c823c04b45 Combine cache components into module
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@53014 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-09 22:08:49 +00:00
fur%netscape.com
03cbd000eb Sync with trunk
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52998 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-09 17:36:48 +00:00
fur%netscape.com
a91e91a1c7 Added Windows makefiles so that the cache manager, file cache and
memory cache components are built as part of netlib and combined into
a single XPCOM module, named "nkcache.dll"


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52970 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-08 22:44:04 +00:00
fur%netscape.com
be7a5a48b6 Added call to LimitCacheSize
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52968 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-08 22:40:41 +00:00
fur%netscape.com
b2bc7468e8 Add cache manager CID
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52967 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-08 22:38:20 +00:00
fur%netscape.com
9692dfd994 Add cache manager ProgID
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52966 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-08 22:37:30 +00:00
fur%netscape.com
f3edd4cfb5 Added an owning reference from nsDiskCacheRecordChannel to
its associated nsDiskCacheRecord.  Without this, the channel
may access free'ed memory.


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52962 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-08 22:28:39 +00:00
fur%netscape.com
33403345c1 Rename class to avoid name collision with similar code in file cache.
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52961 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-08 22:25:28 +00:00
fur%netscape.com
20c850de23 Merge with tip
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52960 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-08 22:23:35 +00:00
fur%netscape.com
e8b619cd02 Merge with tip
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52959 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-08 22:18:25 +00:00
fur%netscape.com
eed396bb92 Stabilize ref-count during construction
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52946 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-08 18:46:17 +00:00
fur%netscape.com
41d44c070b Eliminate dead files
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52932 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-07 20:19:55 +00:00
fur%netscape.com
698ba42268 Revamped directory structure
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52922 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-06 19:36:16 +00:00
(no author)
1a0fd23991 This commit was manufactured by cvs2svn to create branch
'CacheIntegration_BRANCH'.

git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52912 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-06 03:43:56 +00:00
fur%netscape.com
67dded330b Add nsDiskCacheRecordChannel.cpp
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52876 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-05 22:18:26 +00:00
fur%netscape.com
936ff4777a Fix compilation errors on Win32
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52874 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-05 22:18:04 +00:00
fur%netscape.com
96c55e42f7 Accommodate API changes in nsINetDataCache
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52873 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-05 22:17:42 +00:00
fur%netscape.com
82fa0cf06a Got rid of GetReadOnly(). Added GetFlags()
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52872 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-05 22:15:05 +00:00
fur%netscape.com
98c8285334 First shot at Win32 makefile
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52869 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-05 22:03:02 +00:00
fur%netscape.com
bad4b683f4 Removed SetCapacity() method
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52868 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-05 22:01:26 +00:00
fur%netscape.com
cb5269a28a Checkpoint
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52864 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-05 21:47:25 +00:00
fur%netscape.com
74712f3635 Added binary I/O streams
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52860 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-05 20:02:06 +00:00
fur%netscape.com
9f8ea739db Correct error comment
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52859 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-05 20:01:39 +00:00
hoa.nguyen%intel.com
97a10dd7c6 Add offset writing for nsOutputStream
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52789 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-04 18:02:39 +00:00
hoa.nguyen%intel.com
85a132fac0 Add command line switch to test memory and disk cache.
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52788 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-04 18:01:28 +00:00
hoa.nguyen%intel.com
e594eee877 Add proxy channel interface, and misc bug fixes.
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@52787 18797224-902f-48f8-a5cc-f745e15eee43
1999-11-04 17:59:42 +00:00
hoa.nguyen%intel.com
9148eee3d6 Initial checkin of disk cache modules
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@51580 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-22 22:37:18 +00:00
hoa.nguyen%intel.com
b5989a8382 Initial checkin of disk cache module
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@51579 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-22 22:36:07 +00:00
fur%netscape.com
2b861f60d9 Create a new channel for every call to Write()
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@51335 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-21 01:35:51 +00:00
fur%netscape.com
87db050b37 Added tests for:
nsINetDataCache::GetStorageInUse()
    nsINetDataCacheRecord::SetContentLength()
    nsIOutputStream::Write(), using non-zero starting offsets


git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@51071 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-18 23:55:44 +00:00
fur%netscape.com
ffe483cf95 Initial cut at memory-cache functionality is complete
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@51067 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-18 23:46:08 +00:00
fur%netscape.com
52aa17a1c3 Incorporate nsStorageStream into xpcom.dll
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@50814 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-15 07:40:42 +00:00
fur%netscape.com
5fdb3aa69e Initial implementation of 'storage stream' - used as the heart of the memory cache
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@50812 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-15 07:39:45 +00:00
fur%netscape.com
8cae473bc0 Add opaque keys to nsHashtable
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@50811 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-15 07:38:01 +00:00
fur%netscape.com
0719303755 Fix linkage problem
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@50810 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-15 07:37:21 +00:00
fur%netscape.com
90d3e40858 Fix bugs in Next(). Prev() and IsDone()
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@50809 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-15 07:36:35 +00:00
fur%netscape.com
c792b2d35c Changed IDL to generate identical C++ headers, but with better scriptability
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@50808 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-15 07:32:04 +00:00
fur%netscape.com
7a4377d840 Initial cut at memory cache
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@50806 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-15 07:24:06 +00:00
fur%netscape.com
a5fa416010 Added TestRawCache.cpp
git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@50804 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-15 07:19:52 +00:00
(no author)
49d00db5e2 This commit was manufactured by cvs2svn to create branch
'CacheIntegration_BRANCH'.

git-svn-id: svn://10.0.0.236/branches/CacheIntegration_BRANCH@50589 18797224-902f-48f8-a5cc-f745e15eee43
1999-10-13 10:24:13 +00:00
149 changed files with 9037 additions and 25098 deletions

38
mozilla/netwerk/cache/Makefile.in vendored Normal file
View File

@@ -0,0 +1,38 @@
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
#
DEPTH = ../..
topsrcdir = @top_srcdir@
srcdir = @srcdir@
VPATH = @srcdir@
include $(DEPTH)/config/autoconf.mk
DIRS = \
public \
memcache \
filecache \
mgr \
build \
$(NULL)
include $(topsrcdir)/config/rules.mk

33
mozilla/netwerk/cache/Makefile.win vendored Executable file
View File

@@ -0,0 +1,33 @@
#!gmake
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
DEPTH=..\..
DIRS= \
public \
mgr \
memcache \
filecache \
build \
$(NULL)
include <$(DEPTH)\config\rules.mak>

54
mozilla/netwerk/cache/build/Makefile.in vendored Normal file
View File

@@ -0,0 +1,54 @@
#
# The contents of this file are subject to the Netscape Public License
# Version 1.0 (the "NPL"); you may not use this file except in
# compliance with the NPL. You may obtain a copy of the NPL at
# http://www.mozilla.org/NPL/
#
# Software distributed under the NPL is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
# for the specific language governing rights and limitations under the
# NPL.
#
# The Initial Developer of this code under the NPL is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All Rights
# Reserved.
#
DEPTH = ../../..
topsrcdir = @top_srcdir@
srcdir = @srcdir@
VPATH = @srcdir@
include $(DEPTH)/config/autoconf.mk
MODULE = nkcache
LIBRARY_NAME = nkcache
IS_COMPONENT = 1
CPPSRCS = nsNetDataCacheModule.cpp
SHARED_LIBRARY_LIBS = \
$(DIST)/lib/libnkcachemgr_s.a \
$(DIST)/lib/libnkfilecache_s.a \
$(DIST)/lib/libnkmemcache_s.a \
$(DIST)/lib/libmozdbm_s.a \
$(DIST)/lib/libxpcomio_s.a \
$(NULL)
LOCAL_INCLUDES = \
-I$(DEPTH)/netwerk/cache/memcache \
-I$(DEPTH)/netwerk/cache/filecache \
-I$(DEPTH)/netwerk/cache/mgr \
$(NULL)
EXTRA_DSO_LDOPTS = \
$(MKSHLIB_FORCE_ALL) \
$(SHARED_LIBRARY_LIBS) \
$(MKSHLIB_UNFORCE_ALL) \
$(NULL)
include $(topsrcdir)/config/rules.mk
$(LIBRARY) $(SHARED_LIBRARY): $(SHARED_LIBRARY_LIBS) Makefile

View File

@@ -0,0 +1,51 @@
#!gmake
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
DEPTH=..\..\..
MODULE=nkcache
MAKE_OBJ_TYPE=DLL
DLLNAME=nkcache
DLL=.\$(OBJDIR)\$(DLLNAME).dll
CPP_OBJS= \
.\$(OBJDIR)\nsNetDataCacheModule.obj \
$(NULL)
LLIBS= \
$(DIST)\lib\nkcachemgr_s.lib \
$(DIST)\lib\nkfilecache_s.lib \
$(DIST)\lib\nkmemcache_s.lib \
$(DIST)\lib\dbm32.lib \
$(DIST)\lib\xpcom.lib \
$(LIBNSPR)
INCS = $(INCS) \
-I$(DEPTH)\netwerk\cache\memcache \
-I$(DEPTH)\netwerk\cache\filecache \
-I$(DEPTH)\netwerk\cache\mgr \
$(NULL)
include <$(DEPTH)\config\rules.mak>
install:: $(DLL)
$(MAKE_INSTALL) .\$(OBJDIR)\$(DLLNAME).dll $(DIST)\bin\components
$(MAKE_INSTALL) .\$(OBJDIR)\$(DLLNAME).lib $(DIST)\lib

View File

@@ -0,0 +1,49 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
*/
#include "nsCOMPtr.h"
#include "nsIModule.h"
#include "nscore.h"
#include "nsIComponentManager.h"
#include "nsIServiceManager.h"
#include "nsIGenericFactory.h"
#include "nsINetDataCache.h"
#include "nsINetDataCacheManager.h"
#include "nsMemCacheCID.h"
#include "nsMemCache.h"
#include "nsNetDiskCache.h"
#include "nsNetDiskCacheCID.h"
#include "nsCacheManager.h"
// Factory method to create a new nsMemCache instance. Used
// by nsNetDataCacheModule
NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsMemCache, Init)
NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsNetDiskCache, Init)
NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsCacheManager, Init)
static nsModuleComponentInfo components[] = {
{ "Memory Cache", NS_MEM_CACHE_FACTORY_CID, NS_NETWORK_MEMORY_CACHE_PROGID, nsMemCacheConstructor },
{ "File Cache", NS_NETDISKCACHE_CID, NS_NETWORK_FILE_CACHE_PROGID, nsNetDiskCacheConstructor },
{ "Cache Manager",NS_CACHE_MANAGER_CID, NS_NETWORK_CACHE_MANAGER_PROGID,nsCacheManagerConstructor }
};
NS_IMPL_NSGETMODULE("Network Data Cache", components)

View File

@@ -0,0 +1,56 @@
#
# The contents of this file are subject to the Mozilla Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is Mozilla Communicator.
#
# The Initial Developer of the Original Code is Intel Corp.
# Portions created by Intel Corp. are
# Copyright (C) 1999, 1999 Intel Corp. All
# Rights Reserved.
#
# Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
# Carl Wong <carl.wong@intel.com>
#
DEPTH = ../../..
topsrcdir = @top_srcdir@
VPATH = @srcdir@
srcdir = @srcdir@
include $(DEPTH)/config/autoconf.mk
MODULE = nkcache
LIBRARY_NAME = nkfilecache_s
REQUIRES = nspr dbm
EXTRA_DSO_LDOPTS += -L$(DIST)/lib -lmozdbm_s
EXPORTS=nsNetDiskCacheCID.h
CPPSRCS = \
nsDBAccessor.cpp\
nsDBEnumerator.cpp \
nsNetDiskCache.cpp \
nsDiskCacheRecord.cpp \
nsDiskCacheRecordChannel.cpp \
$(NULL)
EXTRA_LIBS = $(NSPR_LIBS)
# we don't want the shared lib, but we want to force the creation of a
# static lib.
override NO_SHARED_LIB=1
override NO_STATIC_LIB=
include $(topsrcdir)/config/rules.mk

View File

@@ -0,0 +1,44 @@
#!gmake
#
# The contents of this file are subject to the Netscape Public License
# Version 1.0 (the "NPL"); you may not use this file except in
# compliance with the NPL. You may obtain a copy of the NPL at
# http://www.mozilla.org/NPL/
#
# Software distributed under the NPL is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
# for the specific language governing rights and limitations under the
# NPL.
#
# The Initial Developer of this code under the NPL is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All Rights
# Reserved.
DEPTH=..\..\..
include <$(DEPTH)/config/config.mak>
MODULE = nkcache
LIBRARY_NAME = nkfilecache_s
CPP_OBJS= \
.\$(OBJDIR)\nsDBAccessor.obj \
.\$(OBJDIR)\nsDBEnumerator.obj \
.\$(OBJDIR)\nsNetDiskCache.obj \
.\$(OBJDIR)\nsDiskCacheRecord.obj \
.\$(OBJDIR)\nsDiskCacheRecordChannel.obj \
$(NULL)
EXPORTS=nsNetDiskCacheCID.h
include <$(DEPTH)\config\rules.mak>
install:: $(LIBRARY)
$(MAKE_INSTALL) $(LIBRARY) $(DIST)\lib
clobber::
rm -rf $(OBJDIR)
rm -f $(DIST)\lib\$(LIBRARY_NAME).lib

View File

@@ -0,0 +1,351 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
// FUR - Add overall description comment here
#include "nsDBAccessor.h"
#include "nscore.h"
#include "prtypes.h"
#include "plhash.h"
#include "nsCRT.h"
#include "nsAutoLock.h"
nsDBAccessor::nsDBAccessor() :
mDB(0) ,
mSessionID(0) ,
mSessionCntr(0)
{
NS_INIT_REFCNT();
}
nsDBAccessor::~nsDBAccessor()
{
printf(" ~nsDBAccessor\n") ;
Shutdown() ;
}
//
// Implement nsISupports methods
//
NS_IMPL_ISUPPORTS(nsDBAccessor, NS_GET_IID(nsIDBAccessor))
///////////////////////////////////////////////////////////
// nsIDBAccessor methods
NS_IMETHODIMP
nsDBAccessor::Init(nsIFileSpec* dbfile)
{
// FUR - lock not needed
m_Lock = PR_NewLock() ;
if(!m_Lock)
return NS_ERROR_OUT_OF_MEMORY ;
char* dbname ;
// this should cover all platforms.
dbfile->GetNativePath(&dbname) ;
// FUR - how is page size chosen ? It's worth putting a comment
// in here about the possible usefulness of tuning these parameters
HASHINFO hash_info = {
16*1024 , /* bucket size */
0 , /* fill factor */
0 , /* number of elements */
0 , /* bytes to cache */
0 , /* hash function */
0} ; /* byte order */
// FUR - lock not needed
nsAutoLock lock(m_Lock) ;
mDB = dbopen(dbname,
O_RDWR | O_CREAT ,
0600 ,
DB_HASH ,
& hash_info) ;
// FUR - does dbname have to be free'ed ?
if(!mDB)
return NS_ERROR_FAILURE ;
// set mSessionID
// FUR - Why the +1 ? (No need for key to be NUL-terminated string.)
PRUint32 len = PL_strlen(SessionKey)+1 ;
DBT db_key, db_data ;
db_key.data = NS_CONST_CAST(char*, SessionKey) ;
db_key.size = len ;
int status = (*mDB->get)(mDB, &db_key, &db_data, 0) ;
if(status == -1) {
NS_ERROR("ERROR: failed get session id in database.") ;
return NS_ERROR_FAILURE ;
}
if(status == 0) {
// get the last session id
PRInt16 *old_ID = NS_STATIC_CAST(PRInt16*, db_data.data) ;
if(*old_ID < ini_sessionID) {
NS_ERROR("ERROR: Bad Session ID in database, corrupted db.") ;
return NS_ERROR_FAILURE ;
}
// FUR - need to comment out all printfs, or turn them into PR_LOG statements
printf("found previous session, id = %d\n", *old_ID) ;
mSessionID = *old_ID + 1 ;
}
else if(status == 1) {
// must be a new db
mSessionID = ini_sessionID ;
}
db_data.data = NS_REINTERPRET_CAST(void*, &mSessionID) ;
db_data.size = sizeof(PRInt16) ;
// store the new session id
status = (*mDB->put)(mDB, &db_key, &db_data, 0) ;
if(status == 0) {
(*mDB->sync)(mDB, 0) ;
return NS_OK ;
}
else {
NS_ERROR("reset session ID failure.") ;
return NS_ERROR_FAILURE ;
}
}
NS_IMETHODIMP
nsDBAccessor::Shutdown(void)
{
if(mDB) {
(*mDB->sync)(mDB, 0) ;
(*mDB->close)(mDB) ;
mDB = nsnull ;
}
// FUR - locks not necessary
if(m_Lock)
PR_DestroyLock(m_Lock);
return NS_OK ;
}
NS_IMETHODIMP
nsDBAccessor::Get(PRInt32 aID, void** anEntry, PRUint32 *aLength)
{
if(!anEntry)
return NS_ERROR_NULL_POINTER ;
*anEntry = nsnull ;
*aLength = 0 ;
NS_ASSERTION(mDB, "no database") ;
// Lock the db
nsAutoLock lock(m_Lock) ;
DBT db_key, db_data ;
db_key.data = NS_REINTERPRET_CAST(void*, &aID) ;
db_key.size = sizeof(PRInt32) ;
int status = 0 ;
status = (*mDB->get)(mDB, &db_key, &db_data, 0) ;
if(status == 0) {
*anEntry = db_data.data ;
*aLength = db_data.size ;
return NS_OK ;
}
else if(status == 1)
return NS_OK ;
else
return NS_ERROR_FAILURE ;
}
NS_IMETHODIMP
nsDBAccessor::Put(PRInt32 aID, void* anEntry, PRUint32 aLength)
{
NS_ASSERTION(mDB, "no database") ;
// Lock the db
nsAutoLock lock(m_Lock) ;
DBT db_key, db_data ;
db_key.data = NS_REINTERPRET_CAST(void*, &aID) ;
db_key.size = sizeof(PRInt32) ;
db_data.data = anEntry ;
db_data.size = aLength ;
if(0 == (*mDB->put)(mDB, &db_key, &db_data, 0)) {
// FUR - I would avoid unnecessary sync'ing for performance's
// sake. Maybe you could limit sync to max rate of, say, once
// every few seconds by keeping track of last sync time, using PR_Now().
(*mDB->sync)(mDB, 0) ;
return NS_OK ;
}
else {
// FUR - Try to avoid using NS_ERROR unless error is unrecoverable and serious
NS_ERROR("ERROR: Failed to put anEntry into db.\n") ;
return NS_ERROR_FAILURE ;
}
}
/*
* It's more important to remove the id->metadata entry first since
* key->id mapping is just a reference
*/
NS_IMETHODIMP
nsDBAccessor::Del(PRInt32 aID, void* anEntry, PRUint32 aLength)
{
NS_ASSERTION(mDB, "no database") ;
// FUR - no locks necessary
// Lock the db
nsAutoLock lock(m_Lock) ;
DBT db_key ;
// delete recordID->metadata
db_key.data = NS_REINTERPRET_CAST(void*, &aID) ;
db_key.size = sizeof(PRInt32) ;
PRInt32 status = -1 ;
status = (*mDB->del)(mDB, &db_key, 0) ;
if(-1 == status) {
// FUR - no printf's, use PR_LOG, NS_WARNING, or NS_ASSERTION, as the situation warrants
printf(" delete error\n") ;
return NS_ERROR_FAILURE ;
}
// delete key->recordID
db_key.data = anEntry ;
db_key.size = aLength ;
status = (*mDB->del)(mDB, &db_key, 0) ;
if(-1 == status) {
// FUR - no printf's
printf(" delete error\n") ;
return NS_ERROR_FAILURE ;
}
// FUR - Defer sync ? See above
(*mDB->sync)(mDB, 0) ;
return NS_OK ;
}
NS_IMETHODIMP
nsDBAccessor::GetID(const char* key, PRUint32 length, PRInt32* aID)
{
NS_ASSERTION(mDB, "no database") ;
// Lock the db
nsAutoLock lock(m_Lock) ;
DBT db_key, db_data ;
db_key.data = NS_CONST_CAST(char*, key) ;
db_key.size = length ;
int status = (*mDB->get)(mDB, &db_key, &db_data, 0) ;
if(status == 0) {
// found recordID
*aID = *(NS_REINTERPRET_CAST(PRInt32*, db_data.data)) ;
return NS_OK ;
}
else if(status == 1) {
// create a new one
PRInt32 id = 0 ;
id = mSessionID << 16 | mSessionCntr++ ;
// add new id into mDB
db_data.data = NS_REINTERPRET_CAST(void*, &id) ;
db_data.size = sizeof(PRInt32) ;
status = (*mDB->put)(mDB, &db_key, &db_data, 0) ;
if(status != 0) {
NS_ERROR("updating db failure.") ;
return NS_ERROR_FAILURE ;
}
// FUR - defer sync ?
(*mDB->sync)(mDB, 0) ;
*aID = id ;
return NS_OK ;
}
else {
NS_ERROR("ERROR: keydb failure.") ;
return NS_ERROR_FAILURE ;
}
}
NS_IMETHODIMP
nsDBAccessor::EnumEntry(void** anEntry, PRUint32* aLength, PRBool bReset)
{
if(!anEntry)
return NS_ERROR_NULL_POINTER ;
*anEntry = nsnull ;
*aLength = 0 ;
NS_ASSERTION(mDB, "no database") ;
PRUint32 flag ;
if(bReset)
flag = R_FIRST ;
else
flag = R_NEXT ;
// Lock the db
nsAutoLock lock(m_Lock) ;
DBT db_key, db_data ;
// FUR - +1 unnecessary ?
PRUint32 len = PL_strlen(SessionKey)+1 ;
int status ;
do {
status = (*mDB->seq)(mDB, &db_key, &db_data, flag) ;
flag = R_NEXT ;
if(status == -1)
return NS_ERROR_FAILURE ;
// get next if it's a key->recordID
if(db_key.size > sizeof(PRInt32) && db_data.size == sizeof(PRInt32))
continue ;
// get next if it's a sessionID entry
if(db_key.size == len && db_data.size == sizeof(PRInt16))
continue ;
// recordID is always 32 bits long
if(db_key.size == sizeof(PRInt32))
break ;
} while(!status) ;
if (0 == status) {
*anEntry = db_data.data ;
*aLength = db_data.size ;
}
return NS_OK ;
}

View File

@@ -0,0 +1,68 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
// FUR - Add overall description comment here
#ifndef _NSIDBACCESSOR_H_
#define _NSIDBACCESSOR_H_
#include "nsIDBAccessor.h"
#include "mcom_db.h"
// bogus string for the key of session id
// FUR - suggest "SK" instead of "^^"
static const char * const SessionKey = "^^" ;
// initial session id number
static const PRInt16 ini_sessionID = 0xff ;
class nsDBAccessor : public nsIDBAccessor
{
public:
NS_DECL_ISUPPORTS
nsDBAccessor() ;
virtual ~nsDBAccessor() ;
NS_IMETHOD Init(nsIFileSpec* dbfile) ;
NS_IMETHOD Shutdown(void) ;
NS_IMETHOD Put(PRInt32 aID, void* anEntry, PRUint32 aLength) ;
NS_IMETHOD Get(PRInt32 aID, void** anEntry, PRUint32 *aLength) ;
NS_IMETHOD Del(PRInt32 aID, void* anEntry, PRUint32 aLength) ;
NS_IMETHOD GetID(const char* key, PRUint32 length, PRInt32* aID) ;
NS_IMETHOD EnumEntry(void* *anEntry, PRUint32* aLength, PRBool bReset) ;
protected:
private:
DB * mDB ;
PRInt16 mSessionID ;
PRInt16 mSessionCntr ;
PRLock * m_Lock ;
} ;
#endif // _NSIDBACCESSOR_H_

View File

@@ -0,0 +1,97 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
// FUR - Add overall description comment here
#include "nsDBEnumerator.h"
#include "nsDiskCacheRecord.h"
nsDBEnumerator::nsDBEnumerator(nsIDBAccessor* aDB, nsNetDiskCache* aCache) :
m_DB(aDB) ,
m_DiskCache(aCache) ,
tempEntry(0) ,
tempEntry_length(0) ,
m_CacheEntry(0) ,
bReset(PR_TRUE)
{
NS_INIT_REFCNT();
}
nsDBEnumerator::~nsDBEnumerator()
{
// printf(" ~nsDBEnumerator()\n") ;
NS_IF_RELEASE(m_CacheEntry) ;
}
//
// Implement nsISupports methods
//
NS_IMPL_ISUPPORTS(nsDBEnumerator, NS_GET_IID(nsIEnumerator))
/////////////////////////////////////////////////////////////////
// nsISimpleEnumerator methods
NS_IMETHODIMP
nsDBEnumerator::HasMoreElements(PRBool *_retval)
{
*_retval = PR_FALSE ;
m_DB->EnumEntry(&tempEntry, &tempEntry_length, bReset) ;
bReset = PR_FALSE ;
if(tempEntry && tempEntry_length != 0)
*_retval = PR_TRUE ;
return NS_OK ;
}
// this routine does not create a new item by itself
// Rather it reuses the item inside the object. So if you need to use the
// item later, you have to
// create a new item specifically, using copy constructor or some other dup
// function. And don't forget to release it after you're done
//
NS_IMETHODIMP
nsDBEnumerator::GetNext(nsISupports **_retval)
{
if(!m_CacheEntry) {
m_CacheEntry = new nsDiskCacheRecord(m_DB, m_DiskCache) ;
if(m_CacheEntry)
NS_ADDREF(m_CacheEntry) ;
else
return NS_ERROR_OUT_OF_MEMORY ;
}
if(!_retval)
return NS_ERROR_NULL_POINTER ;
*_retval = nsnull ;
nsresult rv = m_CacheEntry->RetrieveInfo(tempEntry, tempEntry_length) ;
if(NS_FAILED(rv))
return rv ;
*_retval = NS_STATIC_CAST(nsISupports*, m_CacheEntry) ;
NS_ADDREF(*_retval) ; // all good getter addref
return NS_OK ;
}

View File

@@ -0,0 +1,61 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
// FUR - Add overall description comment here
#ifndef _NS_DBENUMERATOR_H_
#define _NS_DBENUMERATOR_H_
#include "nsISimpleEnumerator.h"
#include "nsINetDataCacheRecord.h"
#include "nsIDBAccessor.h"
#include "nsCOMPtr.h"
#include "nsNetDiskCache.h"
#include "nsDiskCacheRecord.h"
class nsCachedDiskData ; /* forward decl */
class nsDBEnumerator : public nsISimpleEnumerator {
public:
NS_DECL_ISUPPORTS
// FUR can use NS_DECL_NSISIMPLEENUMERATOR here
/* boolean HasMoreElements (); */
NS_IMETHOD HasMoreElements(PRBool *_retval) ;
/* nsISupports GetNext (); */
NS_IMETHOD GetNext(nsISupports **_retval) ;
nsDBEnumerator(nsIDBAccessor* aDB, nsNetDiskCache* aCache) ;
virtual ~nsDBEnumerator() ;
// FUR all members should be prefixed by 'm', e.g. mbReset
private:
nsCOMPtr<nsIDBAccessor> m_DB ;
nsCOMPtr<nsNetDiskCache> m_DiskCache ;
void * tempEntry ;
PRUint32 tempEntry_length ;
nsDiskCacheRecord* m_CacheEntry ;
PRBool bReset ;
};
#endif // _NS_DBENUMERATOR_H_

View File

@@ -0,0 +1,451 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
#include "nsDiskCacheRecord.h"
#include "nsINetDataDiskCache.h"
#include "nsNetDiskCacheCID.h"
#include "nsDiskCacheRecordChannel.h"
#include "nsFileStream.h"
#include "nsIComponentManager.h"
#include "nsIServiceManager.h"
#include "nsIProtocolHandler.h"
#include "nsIIOService.h"
#include "nsIAllocator.h"
#include "plstr.h"
#include "prprf.h"
#include "prmem.h"
#include "prlog.h"
#include "prtypes.h"
#include "netCore.h"
#include "nsDBAccessor.h"
#if !defined(IS_LITTLE_ENDIAN) && !defined(IS_BIG_ENDIAN)
ERROR! Must have a byte order
#endif
#ifdef IS_LITTLE_ENDIAN
#define COPY_INT32(_a,_b) memcpy(_a, _b, sizeof(int32))
#else
#define COPY_INT32(_a,_b) /* swap */ \
do { \
((char *)(_a))[0] = ((char *)(_b))[3]; \
((char *)(_a))[1] = ((char *)(_b))[2]; \
((char *)(_a))[2] = ((char *)(_b))[1]; \
((char *)(_a))[3] = ((char *)(_b))[0]; \
} while(0)
#endif
nsDiskCacheRecord::nsDiskCacheRecord(nsIDBAccessor* db, nsNetDiskCache* aCache) :
mKey(0) ,
mKeyLength(0) ,
mRecordID(0) ,
mMetaData(0) ,
mMetaDataLength(0) ,
mDB(db) ,
mInfo(0) ,
mInfoSize(0) ,
mNumChannels(0) ,
mDiskCache(aCache)
{
NS_INIT_REFCNT();
}
// mem alloced. so caller should do free() on key.
NS_IMETHODIMP
nsDiskCacheRecord::Init(const char* key, PRUint32 length)
{
NS_NewFileSpec(getter_AddRefs(mFile));
if(!mFile)
return NS_ERROR_OUT_OF_MEMORY ;
// copy key
mKeyLength = length ;
mKey = NS_STATIC_CAST(char*, nsAllocator::Alloc(mKeyLength*sizeof(char))) ;
if(!mKey)
return NS_ERROR_OUT_OF_MEMORY ;
memcpy(mKey, key, length) ;
// get RecordID
// FUR!! Another disk access ? If called from GetCachedData, ID is already known
mDB->GetID(key, length, &mRecordID) ;
// FUR - check for GetID failure
// setup the file name
nsCOMPtr<nsIFileSpec> dbFolder ;
mDiskCache->GetDiskCacheFolder(getter_AddRefs(dbFolder)) ;
nsresult rv = mFile->FromFileSpec(dbFolder) ;
if(NS_FAILED(rv))
return NS_ERROR_FAILURE ;
// dir is a hash result of mRecordID%32, hope it's enough
char filename[9], dirName[3] ;
// FUR!! - should the format string be "%.02x". How does this work !?
PR_snprintf(dirName, 3, "%.2x", (((PRUint32)mRecordID) % 32)) ;
mFile->AppendRelativeUnixPath(dirName) ;
// FUR!! - should the format string be "%.08x". How does this work !?
PR_snprintf(filename, 9, "%.8x", mRecordID) ;
mFile->AppendRelativeUnixPath(filename) ;
return NS_OK ;
}
nsDiskCacheRecord::~nsDiskCacheRecord()
{
// printf(" ~nsDiskCacheRecord()\n") ;
if(mKey)
nsAllocator::Free(mKey) ;
if(mMetaData)
nsAllocator::Free(mMetaData) ;
}
//
// Implement nsISupports methods
//
NS_IMPL_ISUPPORTS(nsDiskCacheRecord, NS_GET_IID(nsINetDataCacheRecord))
///////////////////////////////////////////////////////////////////////
// nsINetDataCacheRecord methods
// yes, mem alloced on *_retval.
NS_IMETHODIMP
nsDiskCacheRecord::GetKey(PRUint32 *length, char** _retval)
{
if(!_retval)
return NS_ERROR_NULL_POINTER ;
*length = mKeyLength ;
*_retval = NS_STATIC_CAST(char*, nsAllocator::Alloc(mKeyLength*sizeof(char))) ;
if(!*_retval)
return NS_ERROR_OUT_OF_MEMORY ;
memcpy(*_retval, mKey, mKeyLength) ;
return NS_OK ;
}
NS_IMETHODIMP
nsDiskCacheRecord::GetRecordID(PRInt32* aRecordID)
{
*aRecordID = mRecordID ;
return NS_OK ;
}
// yes, mem alloced on *_retval.
NS_IMETHODIMP
nsDiskCacheRecord::GetMetaData(PRUint32 *length, char **_retval)
{
if(!_retval)
return NS_ERROR_NULL_POINTER ;
// always null the return value first.
*_retval = nsnull ;
*length = mMetaDataLength ;
if(mMetaDataLength) {
*_retval = NS_STATIC_CAST(char*, nsAllocator::Alloc(mMetaDataLength*sizeof(char))) ;
if(!*_retval)
return NS_ERROR_OUT_OF_MEMORY ;
memcpy(*_retval, mMetaData, mMetaDataLength) ;
}
return NS_OK ;
}
NS_IMETHODIMP
nsDiskCacheRecord::SetMetaData(PRUint32 length, const char* data)
{
// set the mMetaData
mMetaDataLength = length ;
if(mMetaData)
nsAllocator::Free(mMetaData) ;
mMetaData = NS_STATIC_CAST(char*, nsAllocator::Alloc(mMetaDataLength*sizeof(char))) ;
if(!mMetaData) {
return NS_ERROR_OUT_OF_MEMORY ;
}
memcpy(mMetaData, data, length) ;
// Generate mInfo
nsresult rv = GenInfo() ;
if(NS_FAILED(rv))
return rv ;
// write through into mDB
rv = mDB->Put(mRecordID, mInfo, mInfoSize) ;
// FUR - mInfo leaking ?
return rv ;
}
NS_IMETHODIMP
nsDiskCacheRecord::GetStoredContentLength(PRUint32 *aStoredContentLength)
{
return mFile->GetFileSize(aStoredContentLength) ;
}
// untill nsIFileSpec::Truncate() is in, we have to do all this ugly stuff
NS_IMETHODIMP
nsDiskCacheRecord::SetStoredContentLength(PRUint32 aStoredContentLength)
{
PRUint32 len = 0 ;
nsresult rv = mFile->GetFileSize(&len) ;
if(NS_FAILED(rv))
return rv ;
if(len < aStoredContentLength)
{
NS_ERROR("Error: can not set filesize to something bigger than itself.\n") ;
return NS_ERROR_FAILURE ;
}
else
return mFile->Truncate(aStoredContentLength) ;
}
NS_IMETHODIMP
nsDiskCacheRecord::Delete(void)
{
if(mNumChannels)
return NS_ERROR_NOT_AVAILABLE ;
PRUint32 len ;
mFile->GetFileSize(&len) ;
nsFileSpec cache_file ;
nsresult rv = mFile->GetFileSpec(&cache_file) ;
if(NS_FAILED(rv))
return NS_ERROR_FAILURE ;
cache_file.Delete(PR_TRUE) ;
// updata the storage size
mDiskCache->m_StorageInUse -= len ;
rv = mDB->Del(mRecordID, mKey, mKeyLength) ;
if(NS_FAILED(rv))
return NS_ERROR_FAILURE ;
else
return NS_OK ;
}
NS_IMETHODIMP
nsDiskCacheRecord::GetFilename(nsIFileSpec * *aFilename)
{
if(!aFilename)
return NS_ERROR_NULL_POINTER ;
*aFilename = mFile ;
NS_ADDREF(*aFilename) ;
return NS_OK ;
}
NS_IMETHODIMP
nsDiskCacheRecord::NewChannel(nsILoadGroup *loadGroup, nsIChannel **_retval)
{
nsDiskCacheRecordChannel* channel = new nsDiskCacheRecordChannel(this, loadGroup) ;
if(!channel)
return NS_ERROR_OUT_OF_MEMORY ;
nsresult rv = channel->Init() ;
if(NS_FAILED(rv))
return rv ;
NS_ADDREF(channel) ;
*_retval = NS_STATIC_CAST(nsIChannel*, channel) ;
return NS_OK ;
}
//////////////////////////////////////////////////////////////////////////
// nsDiskCacheRecord methods
// file name is represented by a url string. I hope this would be more
// generic
nsresult
nsDiskCacheRecord::GenInfo()
{
if(mInfo)
nsAllocator::Free(mInfo) ;
char* file_url=nsnull ;
PRUint32 name_len ;
mFile->GetURLString(&file_url) ;
name_len = PL_strlen(file_url)+1 ;
mInfoSize = sizeof(PRUint32) ; // checksum for mInfoSize
mInfoSize += sizeof(PRInt32) ; // RecordID
mInfoSize += sizeof(PRUint32) ; // key length
mInfoSize += mKeyLength ; // key
mInfoSize += sizeof(PRUint32) ; // metadata length
mInfoSize += mMetaDataLength ; // metadata
mInfoSize += sizeof(PRUint32) ; // filename length
mInfoSize += name_len ; // filename
void* newInfo = nsAllocator::Alloc(mInfoSize*sizeof(char)) ;
if(!newInfo) {
return NS_ERROR_OUT_OF_MEMORY ;
}
// copy the checksum mInfoSize
char* cur_ptr = NS_STATIC_CAST(char*, newInfo) ;
COPY_INT32(cur_ptr, &mInfoSize) ;
cur_ptr += sizeof(PRUint32) ;
// copy RecordID
COPY_INT32(cur_ptr, &mRecordID) ;
cur_ptr += sizeof(PRInt32) ;
// copy key length
COPY_INT32(cur_ptr, &mKeyLength) ;
cur_ptr += sizeof(PRUint32) ;
// copy key
memcpy(cur_ptr, mKey, mKeyLength) ;
cur_ptr += mKeyLength ;
// copy metadata length
COPY_INT32(cur_ptr, &mMetaDataLength) ;
cur_ptr += sizeof(PRUint32) ;
// copy metadata
memcpy(cur_ptr, mMetaData, mMetaDataLength) ;
cur_ptr += mMetaDataLength ;
// copy file name length
COPY_INT32(cur_ptr, &name_len) ;
cur_ptr += sizeof(PRUint32) ;
// copy file name
memcpy(cur_ptr, file_url, name_len) ;
cur_ptr += name_len ;
PR_ASSERT(cur_ptr == NS_STATIC_CAST(char*, newInfo) + mInfoSize);
mInfo = newInfo ;
return NS_OK ;
}
/*
* This Method suppose to get all the info from the db record
* and set them to accroding members. the original values
* will all be overwritten. only minimal error checking is performed.
*/
NS_IMETHODIMP
nsDiskCacheRecord::RetrieveInfo(void* aInfo, PRUint32 aInfoLength)
{
// reset everything
if(mInfo) {
nsAllocator::Free(mInfo) ;
mInfo = nsnull ;
}
if(mKey) {
nsAllocator::Free(mKey) ;
mKey = nsnull ;
}
if(mMetaData) {
nsAllocator::Free(mMetaData) ;
mMetaData = nsnull ;
}
char * cur_ptr = NS_STATIC_CAST(char*, aInfo) ;
char* file_url ;
PRUint32 name_len ;
// set mInfoSize
COPY_INT32(&mInfoSize, cur_ptr) ;
cur_ptr += sizeof(PRUint32) ;
// check this at least
if(mInfoSize != aInfoLength)
return NS_ERROR_FAILURE ;
// set mRecordID
COPY_INT32(&mRecordID, cur_ptr) ;
cur_ptr += sizeof(PRInt32) ;
// set mKeyLength
COPY_INT32(&mKeyLength, cur_ptr) ;
cur_ptr += sizeof(PRUint32) ;
// set mKey
mKey = NS_STATIC_CAST(char*, nsAllocator::Alloc(mKeyLength*sizeof(char))) ;
if(!mKey)
return NS_ERROR_OUT_OF_MEMORY ;
memcpy(mKey, cur_ptr, mKeyLength) ;
cur_ptr += mKeyLength ;
PRInt32 id ;
mDB->GetID(mKey, mKeyLength, &id) ;
NS_ASSERTION(id==mRecordID, "\t ++++++ bad record, somethings wrong\n") ;
// set mMetaDataLength
COPY_INT32(&mMetaDataLength, cur_ptr) ;
cur_ptr += sizeof(PRUint32) ;
// set mMetaData
mMetaData = NS_STATIC_CAST(char*, nsAllocator::Alloc(mMetaDataLength*sizeof(char))) ;
if(!mMetaData)
return NS_ERROR_OUT_OF_MEMORY ;
memcpy(mMetaData, cur_ptr, mMetaDataLength) ;
cur_ptr += mMetaDataLength ;
// get mFile name length
COPY_INT32(&name_len, cur_ptr) ;
cur_ptr += sizeof(PRUint32) ;
// get mFile native name
file_url = NS_STATIC_CAST(char*, nsAllocator::Alloc(name_len*sizeof(char))) ;
if(!file_url)
return NS_ERROR_OUT_OF_MEMORY ;
memcpy(file_url, cur_ptr, name_len) ;
cur_ptr += name_len ;
PR_ASSERT(cur_ptr == NS_STATIC_CAST(char*, aInfo) + mInfoSize);
// create mFile if Init() isn't called
if(!mFile) {
NS_NewFileSpec(getter_AddRefs(mFile));
if(!mFile)
return NS_ERROR_OUT_OF_MEMORY ;
}
// setup mFile
mFile->SetURLString(file_url) ;
return NS_OK ;
}

View File

@@ -0,0 +1,70 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
#ifndef _NET_CACHEDDISKDATA_H_
#define _NET_CACHEDDISKDATA_H_
#include "nsINetDataCacheRecord.h"
#include "nsCOMPtr.h"
#include "nsIDBAccessor.h"
#include "prtypes.h"
#include "nsILoadGroup.h"
#include "nsIFileChannel.h"
#include "nsNetDiskCache.h"
class nsDiskCacheRecord : public nsINetDataCacheRecord
{
public:
NS_DECL_ISUPPORTS
NS_DECL_NSINETDATACACHERECORD
protected:
nsDiskCacheRecord(nsIDBAccessor* db, nsNetDiskCache* aCache) ;
virtual ~nsDiskCacheRecord() ;
NS_IMETHOD RetrieveInfo(void* aInfo, PRUint32 aInfoLength) ;
NS_IMETHOD Init(const char* key, PRUint32 length) ;
nsresult GenInfo(void) ;
private:
char* mKey ;
PRUint32 mKeyLength ;
PRInt32 mRecordID ;
char* mMetaData ;
PRUint32 mMetaDataLength ;
nsCOMPtr<nsIFileSpec> mFile ;
nsCOMPtr<nsIDBAccessor> mDB ;
void* mInfo ;
PRUint32 mInfoSize ;
PRUint32 mNumChannels ;
nsCOMPtr<nsNetDiskCache> mDiskCache ;
friend class nsDiskCacheRecordChannel ;
friend class nsDBEnumerator ;
friend class nsNetDiskCache ;
} ;
#endif // _NET_CACHEDDISKDATA_H_

View File

@@ -0,0 +1,392 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
#include "nsDiskCacheRecordChannel.h"
//#include "nsFileTransport.h"
#include "nsIIOService.h"
#include "nsIServiceManager.h"
#include "nsIOutputStream.h"
static NS_DEFINE_CID(kIOServiceCID, NS_IOSERVICE_CID);
// This is copied from nsMemCacheChannel, We should consolidate these two.
class WriteStreamWrapper : public nsIOutputStream
{
public:
WriteStreamWrapper(nsDiskCacheRecordChannel* aChannel,
nsIOutputStream *aBaseStream) ;
virtual ~WriteStreamWrapper() ;
static nsresult
Create(nsDiskCacheRecordChannel* aChannel, nsIOutputStream *aBaseStream, nsIOutputStream* *aWrapper) ;
NS_DECL_ISUPPORTS
NS_DECL_NSIBASESTREAM
NS_DECL_NSIOUTPUTSTREAM
private:
nsDiskCacheRecordChannel* mChannel;
nsCOMPtr<nsIOutputStream> mBaseStream;
} ;
// implement nsISupports
NS_IMPL_ISUPPORTS(WriteStreamWrapper, NS_GET_IID(nsIOutputStream))
WriteStreamWrapper::WriteStreamWrapper(nsDiskCacheRecordChannel* aChannel,
nsIOutputStream *aBaseStream)
: mChannel(aChannel), mBaseStream(aBaseStream)
{
NS_INIT_REFCNT();
NS_ADDREF(mChannel);
}
WriteStreamWrapper::~WriteStreamWrapper()
{
NS_RELEASE(mChannel);
}
nsresult
WriteStreamWrapper::Create(nsDiskCacheRecordChannel*aChannel, nsIOutputStream *aBaseStream, nsIOutputStream* * aWrapper)
{
WriteStreamWrapper *wrapper = new WriteStreamWrapper(aChannel, aBaseStream);
if (!wrapper) return NS_ERROR_OUT_OF_MEMORY;
NS_ADDREF(wrapper);
*aWrapper = wrapper;
return NS_OK;
}
NS_IMETHODIMP
WriteStreamWrapper::Write(const char *aBuffer, PRUint32 aCount, PRUint32 *aNumWritten)
{
*aNumWritten = 0;
nsresult rv = mBaseStream->Write(aBuffer, aCount, aNumWritten);
mChannel->NotifyStorageInUse(*aNumWritten);
return rv;
}
NS_IMETHODIMP
WriteStreamWrapper::Flush()
{
return mBaseStream->Flush();
}
NS_IMETHODIMP
WriteStreamWrapper::Close()
{
return mBaseStream->Close();
}
nsDiskCacheRecordChannel::nsDiskCacheRecordChannel(nsDiskCacheRecord *aRecord,
nsILoadGroup *aLoadGroup)
: mRecord(aRecord) ,
mLoadGroup(aLoadGroup)
{
NS_INIT_REFCNT() ;
mRecord->mNumChannels++ ;
}
nsDiskCacheRecordChannel::~nsDiskCacheRecordChannel()
{
mRecord->mNumChannels-- ;
}
// FUR!!
//
// I know that I gave conflicting advice on the issue of file
// transport versus file protocol handler, but I thought that the
// last word was that we would use the raw transport, when I wrote:
//
// > I just thought of an argument for the other side of the coin, i.e. the
// > benefit of *not* reusing the file protocol handler: On the Mac, it's
// > expensive to convert from a string URL to an nsFileSpec, because the Mac
// > is brain-dead and scans every directory on the path to the file. It's
// > cheaper to create a nsFileSpec for a cache file by combining a single,
// > static nsFileSpec that corresponds to the cache directory with the
// > relative path to the cache file (using nsFileSpec's operator +). This
// > operation is optimized on the Mac to avoid the scanning operation.
//
// The Mac guys will eat us alive if we do path string to nsFileSpec
// conversions for every cache file we open.
nsresult
nsDiskCacheRecordChannel::Init(void)
{
char* urlStr ;
mRecord->mFile->GetURLString(&urlStr) ;
nsresult rv ;
NS_WITH_SERVICE(nsIIOService, serv, kIOServiceCID, &rv);
if (NS_FAILED(rv)) return rv;
rv = serv->NewChannel("load", // XXX what should this be?
urlStr,
nsnull, // no base uri
mLoadGroup,
nsnull, // no eventsink getter
0,
nsnull, // no original URI
0,
0,
getter_AddRefs(mFileTransport));
return rv ;
}
nsresult
nsDiskCacheRecordChannel::NotifyStorageInUse(PRInt32 aBytesUsed)
{
return mRecord->mDiskCache->m_StorageInUse += aBytesUsed ;
}
// implement nsISupports
NS_IMPL_ISUPPORTS(nsDiskCacheRecordChannel, NS_GET_IID(nsIChannel))
// implement nsIRequest
NS_IMETHODIMP
nsDiskCacheRecordChannel::IsPending(PRBool *aIsPending)
{
*aIsPending = PR_FALSE ;
if(!mFileTransport)
return NS_OK ;
return mFileTransport->IsPending(aIsPending) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::Cancel(void)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->Cancel() ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::Suspend(void)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->Suspend() ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::Resume(void)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->Resume() ;
}
// implement nsIChannel
NS_IMETHODIMP
nsDiskCacheRecordChannel::GetURI(nsIURI * *aURI)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->GetURI(aURI) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::OpenInputStream(PRUint32 aStartPosition,
PRInt32 aReadCount,
nsIInputStream* *aResult)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->OpenInputStream(aStartPosition,
aReadCount,
aResult) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::OpenOutputStream(PRUint32 startPosition,
nsIOutputStream* *aResult)
{
nsresult rv ;
NS_ENSURE_ARG(aResult) ;
nsCOMPtr<nsIOutputStream> outputStream ;
PRUint32 oldLength ;
mRecord->GetStoredContentLength(&oldLength) ;
if(startPosition < oldLength) {
NotifyStorageInUse(startPosition - oldLength) ;
// we should truncate the file at here.
rv = mRecord->SetStoredContentLength(startPosition) ;
if(NS_FAILED(rv)) {
printf(" failed to truncate\n") ;
return rv ;
}
}
rv = mFileTransport->OpenOutputStream(startPosition, getter_AddRefs(outputStream)) ;
if(NS_FAILED(rv)) return rv ;
return WriteStreamWrapper::Create(this, outputStream, aResult) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::AsyncOpen(nsIStreamObserver *observer,
nsISupports *ctxt)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->AsyncOpen(observer, ctxt) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::AsyncRead(PRUint32 aStartPosition,
PRInt32 aReadCount,
nsISupports *aContext,
nsIStreamListener *aListener)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->AsyncRead(aStartPosition ,
aReadCount ,
aContext ,
aListener) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::AsyncWrite(nsIInputStream *fromStream,
PRUint32 startPosition,
PRInt32 writeCount,
nsISupports *ctxt,
nsIStreamObserver *observer)
{
/*
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->AsyncWrite(fromStream,
startPosition,
writeCount,
ctxt,
observer) ;
*/
// I can't do this since the write is not monitored, and I won't be
// able to updata the storage.
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::GetLoadAttributes(nsLoadFlags *aLoadAttributes)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->GetLoadAttributes(aLoadAttributes) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::SetLoadAttributes(nsLoadFlags aLoadAttributes)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->SetLoadAttributes(aLoadAttributes) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::GetContentType(char * *aContentType)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->GetContentType(aContentType) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::GetContentLength(PRInt32 *aContentLength)
{
if(!mFileTransport)
return NS_ERROR_FAILURE ;
return mFileTransport->GetContentLength(aContentLength) ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::GetOwner(nsISupports* *aOwner)
{
*aOwner = mOwner.get() ;
NS_IF_ADDREF(*aOwner) ;
return NS_OK ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::SetOwner(nsISupports* aOwner)
{
mOwner = aOwner ;
return NS_OK ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::GetOriginalURI(nsIURI* *aURI)
{
// FUR - might need to implement this - not sure
return NS_ERROR_NOT_IMPLEMENTED ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::GetLoadGroup(nsILoadGroup* *aLoadGroup)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsDiskCacheRecordChannel method unexpectedly called");
return NS_OK ;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::SetLoadGroup(nsILoadGroup* aLoadGroup)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsDiskCacheRecordChannel method unexpectedly called");
return NS_OK;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::GetNotificationCallbacks(nsIInterfaceRequestor* *aNotificationCallbacks)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsDiskCacheRecordChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsDiskCacheRecordChannel::SetNotificationCallbacks(nsIInterfaceRequestor* aNotificationCallbacks)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsDiskCacheRecordChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}

View File

@@ -0,0 +1,65 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
#ifndef _ns_DiskCacheRecordChannel_h_
#define _ns_DiskCacheRecordChannel_h_
#include "nsIChannel.h"
#include "nsCOMPtr.h"
#include "nsDiskCacheRecord.h"
/*
* This class is plagiarized from nsMemCacheChannel
*/
class nsDiskCacheRecordChannel : public nsIChannel
{
public:
nsDiskCacheRecordChannel(nsDiskCacheRecord *aRecord, nsILoadGroup *aLoadGroup);
virtual ~nsDiskCacheRecordChannel() ;
// Declare nsISupports methods
NS_DECL_ISUPPORTS
// Declare nsIRequest methods
NS_DECL_NSIREQUEST
// Declare nsIChannel methods
NS_DECL_NSICHANNEL
nsresult Init(void) ;
private:
nsresult NotifyStorageInUse(PRInt32 aBytesUsed) ;
nsCOMPtr<nsDiskCacheRecord> mRecord ;
nsCOMPtr<nsILoadGroup> mLoadGroup ;
nsCOMPtr<nsISupports> mOwner ;
nsCOMPtr<nsIChannel> mFileTransport ;
friend class WriteStreamWrapper ;
} ;
#endif // _ns_DiskCacheRecordChannel_h_

View File

@@ -0,0 +1,60 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
#ifndef _NS_IDBACCESSOR_H_
#define _NS_IDBACCESSOR_H_
#include "nsISupports.h"
#include "nsIFileSpec.h"
// nsIDBAccessorIID {6AADD4D0-7785-11d3-87FE-000629D01344}
#define NS_IDBACCESSOR_IID \
{ 0x6aadd4d0, 0x7785, 0x11d3, \
{0x87, 0xfe, 0x0, 0x6, 0x29, 0xd0, 0x13, 0x44}}
// nsDBAccessorCID {6AADD4D1-7785-11d3-87FE-000629D01344}
#define NS_DBACCESSOR_CID \
{ 0x6aadd4d1, 0x7785, 0x11d3, \
{ 0x87, 0xfe, 0x0, 0x6, 0x29, 0xd0, 0x13, 0x44 }}
class nsIDBAccessor : public nsISupports
{
public:
NS_DEFINE_STATIC_IID_ACCESSOR(NS_IDBACCESSOR_IID)
NS_IMETHOD Init(nsIFileSpec* DBFile) = 0 ;
NS_IMETHOD Shutdown(void) = 0 ;
NS_IMETHOD Put(PRInt32 aID, void* anEntry, PRUint32 aLength) = 0 ;
NS_IMETHOD Get(PRInt32 aID, void** anEntry, PRUint32 *aLength) = 0 ;
NS_IMETHOD Del(PRInt32 aID, void* anEntry, PRUint32 aLength) = 0 ;
NS_IMETHOD GetID(const char* key, PRUint32 length, PRInt32* aID) = 0 ;
NS_IMETHOD EnumEntry(void* *anEntry, PRUint32* aLength, PRBool bReset) = 0 ;
} ;
#endif // _NS_IDBACCESSOR_H_

View File

@@ -0,0 +1,691 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
#include "nsNetDiskCache.h"
#include "nscore.h"
#include "plstr.h"
#include "prprf.h"
#include "prtypes.h"
#include "prio.h"
#include "prsystem.h" // Directory Seperator
#include "plhash.h"
#include "prclist.h"
#include "prmem.h"
#include "nsIComponentManager.h"
#include "nsIServiceManager.h"
#include "nsIPref.h"
#include "mcom_db.h"
#include "nsDBEnumerator.h"
#include "nsDiskCacheRecord.h"
static NS_DEFINE_CID(kPrefCID, NS_PREF_CID) ;
static NS_DEFINE_CID(kDBAccessorCID, NS_DBACCESSOR_CID) ;
static const PRUint32 DISK_CACHE_SIZE_DEFAULT = 5*1024*1024 ; // 5MB
static const char * const DISK_CACHE_PREF = "browser.cache.disk_cache_size";
static const char * const CACHE_DIR_PREF = "browser.cache.directory";
class nsDiskCacheRecord ;
nsNetDiskCache::nsNetDiskCache() :
m_Enabled(PR_TRUE) ,
m_NumEntries(0) ,
m_pNextCache(0) ,
m_pDiskCacheFolder(0) ,
m_StorageInUse(0) ,
m_DB(0) ,
m_BaseDirNum(32)
{
// set it to INF for now
m_MaxEntries = (PRUint32)-1 ;
NS_INIT_REFCNT();
}
nsNetDiskCache::~nsNetDiskCache()
{
printf("~nsNetDiskCache\n") ;
NS_IF_RELEASE(m_DB) ;
// FUR!!
// You shouldn't rely on the value of m_BaseDirNum to diagnose whether or not
// a cache corruption has occurred since it's possible that the app does not
// shut down cleanly and a corrupted cache has still not been cleaned up from
// a previous session. My suggestion is that you pick a different scheme for
// renaming the dirs, e.g. rename them as "trash*" and remove all directories
// with this name pattern on shutdown.
// FUR
// I think that, eventually, we also want a distinguished key in the DB which
// means "clean cache shutdown". You clear this flag when the db is first
// opened and set it just before the db is closed. If the db wasn't shutdown
// cleanly in a prior session, i.e. because the app crashed, on startup you
// scan all the individual files in directories and look for "orphans",
// i.e. cache files which don't have corresponding entries in the db. That's
// also when storage-in-use and number of entries would be recomputed.
//
// We don't necessarily need all this functionality immediately, though.
if(m_BaseDirNum > 32)
RemoveDirs(32) ;
}
NS_IMETHODIMP
nsNetDiskCache::Init(void)
{
nsresult rv ;
// FUR!!
// I really don't think prefs belong here, since that breaks modularity. It
// presupposes that the file cache code is embedded in the browser or some
// other application that uses the prefs, i.e. the code might be used in a
// standalone cache manipulation tool or, someday, in server code. Pref
// reading belongs at a higher level, either in the application itself or
// possibly the I/O manager.
// Also, Init() needs to be lazy, since folder name is not set on startup,
// i.e. need a call to MaybeInit() at the beginning of every public method
NS_WITH_SERVICE(nsIPref, pref, kPrefCID, &rv) ;
if (NS_FAILED(rv))
NS_ERROR("Failed to get globle preference!\n") ;
rv = NS_NewFileSpec(getter_AddRefs(m_pDiskCacheFolder));
if (!m_pDiskCacheFolder) {
NS_ERROR("ERROR: Could not make a file spec.\n") ;
return NS_ERROR_OUT_OF_MEMORY ;
}
char* tempPref = 0 ;
if(pref) {
PRInt32 nTemp = 0 ;
/*
rv = pref->CopyCharPref(CACHE_DIR_PREF, &tempPref) ;
if (NS_SUCCEEDED(rv)) {
printf("cache dir is %s\n", tempPref) ;
m_pDiskCacheFolder->SetUnixStyleFilePath(tempPref) ;
PR_Free(tempPref) ;
} else */
{
m_pDiskCacheFolder->SetUnixStyleFilePath("/tmp") ;
printf("using default folder, /tmp\n") ;
}
}
else {
// temp hack for now. change later for other platform
m_pDiskCacheFolder->SetUnixStyleFilePath("/tmp") ;
}
// FUR - suggest you use nsCOMPtr for m_DB - it will eliminate
// manual addref/release and reduce likelihood of bugs
NS_IF_RELEASE(m_DB) ;
m_DB = new nsDBAccessor() ;
if(!m_DB)
return NS_ERROR_OUT_OF_MEMORY ;
else
NS_ADDREF(m_DB) ;
rv = InitDB() ;
// try once for recovery
if(rv == NS_ERROR_FAILURE) {
rv = DBRecovery() ;
return rv ;
}
rv = UpdateInfo() ;
return rv ;
}
NS_IMETHODIMP
nsNetDiskCache::InitDB(void)
{
// create cache sub directories
nsresult rv ;
nsCOMPtr<nsIFileSpec> cacheSubDir;
rv = NS_NewFileSpec(getter_AddRefs(cacheSubDir));
// FUR - any way to avoid doing this, if it's already been done ?
for (int i=0; i < 32; i++) {
rv = cacheSubDir->FromFileSpec(m_pDiskCacheFolder) ;
if(NS_FAILED(rv))
return rv ;
char dirName[3];
PR_snprintf (dirName, 3, "%0.2x", i);
cacheSubDir->AppendRelativeUnixPath (dirName) ;
CreateDir(cacheSubDir);
}
NS_NewFileSpec(getter_AddRefs(m_DBFile)) ;
// FUR - check for NS_NewFileSpec failure
rv = m_DBFile->FromFileSpec(m_pDiskCacheFolder) ;
if(NS_FAILED(rv))
return rv ;
m_DBFile->AppendRelativeUnixPath("cache.db") ;
rv = m_DB->Init(m_DBFile) ;
return rv ;
}
//////////////////////////////////////////////////////////////////////////
// nsISupports methods
// FUR - Suggest you use NS_IMPL_ISUPPORTS3() macro instead
NS_IMETHODIMP
nsNetDiskCache::QueryInterface(const nsIID& aIID, void** aInstancePtr)
{
NS_ASSERTION(aInstancePtr, "no instance pointer");
if(aIID.Equals(NS_GET_IID(nsINetDataDiskCache)) ||
aIID.Equals(NS_GET_IID(nsINetDataCache)) ||
aIID.Equals(NS_GET_IID(nsISupports))) {
*aInstancePtr = NS_STATIC_CAST(nsINetDataDiskCache*, this);
NS_ADDREF_THIS();
return NS_OK;
}
else
return NS_NOINTERFACE ;
}
NS_IMPL_ADDREF(nsNetDiskCache) ;
NS_IMPL_RELEASE(nsNetDiskCache) ;
///////////////////////////////////////////////////////////////////////////
// nsINetDataCache Method
NS_IMETHODIMP
nsNetDiskCache::GetDescription(PRUnichar* *aDescription)
{
nsAutoString description("Disk Cache") ;
*aDescription = description.ToNewUnicode() ;
if(!*aDescription)
return NS_ERROR_OUT_OF_MEMORY ;
return NS_OK ;
}
/* don't alloc mem for nsICachedNetData.
* RecordID is generated using the same scheme in nsCacheDiskData,
* see GetCachedNetData() for detail.
*/
NS_IMETHODIMP
nsNetDiskCache::Contains(const char* key, PRUint32 length, PRBool *_retval)
{
*_retval = PR_FALSE ;
NS_ASSERTION(m_DB, "no db.") ;
PRInt32 id = 0 ;
m_DB->GetID(key, length, &id) ;
// FUR - Check for GetID failure ?
void* info = 0 ;
PRUint32 info_size = 0 ;
nsresult rv = m_DB->Get(id, &info, &info_size) ;
if(NS_SUCCEEDED(rv) && info)
*_retval = PR_TRUE ;
return NS_OK ;
}
/* regardless if it's cached or not, a copy of nsNetDiskCache would
* always be returned. so release it appropriately.
* if mem alloced, updata m_NumEntries also.
* for now, the new nsCachedNetData is not written into db yet since
* we have nothing to write.
*/
NS_IMETHODIMP
nsNetDiskCache::GetCachedNetData(const char* key, PRUint32 length, nsINetDataCacheRecord **_retval)
{
NS_ASSERTION(m_DB, "no db.") ;
nsresult rv = 0 ;
if (!_retval)
return NS_ERROR_NULL_POINTER ;
*_retval = nsnull ;
PRInt32 id = 0 ;
m_DB->GetID(key, length, &id) ;
// FUR - Check for GetID failure ?
// construct an empty record
nsDiskCacheRecord* newRecord = new nsDiskCacheRecord(m_DB, this) ;
if(!newRecord)
return NS_ERROR_OUT_OF_MEMORY ;
rv = newRecord->Init(key, length) ;
if(NS_FAILED(rv)) {
delete newRecord ;
return rv ;
}
NS_ADDREF(newRecord) ; // addref for _retval
*_retval = (nsINetDataCacheRecord*) newRecord ;
void* info = 0 ;
PRUint32 info_size = 0 ;
rv = m_DB->Get(id, &info, &info_size) ;
if(NS_SUCCEEDED(rv) && info) {
nsresult r1 ;
r1 = newRecord->RetrieveInfo(info, info_size) ;
// FUR!! need to release and return error if RetrieveInfo() fails
if(NS_SUCCEEDED(rv))
return NS_OK ;
else
return r1;
} else if (NS_SUCCEEDED(rv) && !info) {
// this is a new record.
m_NumEntries ++ ;
return NS_OK ;
} else
return rv ;
}
/* get an nsICachedNetData, mem needs to be de-alloced if not found. */
NS_IMETHODIMP
nsNetDiskCache::GetCachedNetDataByID(PRInt32 RecordID, nsINetDataCacheRecord **_retval)
{
NS_ASSERTION(m_DB, "no db.") ;
if (!_retval)
return NS_ERROR_NULL_POINTER ;
*_retval = nsnull ;
nsresult rv ;
void* info = 0 ;
PRUint32 info_size = 0 ;
rv = m_DB->Get(RecordID, &info, &info_size) ;
if(NS_SUCCEEDED(rv) && info) {
// construct an empty record if only found in db
nsDiskCacheRecord* newRecord = new nsDiskCacheRecord(m_DB, this) ;
if(!newRecord)
return NS_ERROR_OUT_OF_MEMORY ;
NS_ADDREF(newRecord) ; // addref for _retval
rv = newRecord->RetrieveInfo(info, info_size) ;
if(NS_SUCCEEDED(rv)) {
*_retval = (nsINetDataCacheRecord*) newRecord ;
return NS_OK ;
}
else {
// bad record, I guess
NS_RELEASE(newRecord) ; // release if bad things happen
return rv ;
}
} else {
NS_ERROR("Error: RecordID not in DB\n") ;
return rv ;
}
}
NS_IMETHODIMP
nsNetDiskCache::GetEnabled(PRBool *aEnabled)
{
*aEnabled = m_Enabled ;
return NS_OK ;
}
NS_IMETHODIMP
nsNetDiskCache::SetEnabled(PRBool aEnabled)
{
m_Enabled = aEnabled ;
return NS_OK ;
}
NS_IMETHODIMP
nsNetDiskCache::GetFlags(PRUint32 *aFlags)
{
*aFlags = FILE_PER_URL_CACHE;
return NS_OK ;
}
NS_IMETHODIMP
nsNetDiskCache::GetNumEntries(PRUint32 *aNumEntries)
{
*aNumEntries = m_NumEntries ;
return NS_OK ;
}
NS_IMETHODIMP
nsNetDiskCache::GetMaxEntries(PRUint32 *aMaxEntries)
{
*aMaxEntries = m_MaxEntries ;
return NS_OK ;
}
NS_IMETHODIMP
nsNetDiskCache::NewCacheEntryIterator(nsISimpleEnumerator **_retval)
{
if(!_retval)
return NS_ERROR_NULL_POINTER ;
*_retval = nsnull ;
nsISimpleEnumerator* enumerator = new nsDBEnumerator(m_DB, this) ;
if(enumerator) {
NS_ADDREF(enumerator) ;
*_retval = enumerator ;
return NS_OK ;
}
else
return NS_ERROR_OUT_OF_MEMORY ;
}
NS_IMETHODIMP
nsNetDiskCache::GetNextCache(nsINetDataCache * *aNextCache)
{
if(!aNextCache)
return NS_ERROR_NULL_POINTER ;
*aNextCache = m_pNextCache ;
return NS_OK ;
}
NS_IMETHODIMP
nsNetDiskCache::SetNextCache(nsINetDataCache *aNextCache)
{
m_pNextCache = aNextCache ;
return NS_OK ;
}
// db size can always be measured at the last minute. Since it's hard
// to know before hand.
NS_IMETHODIMP
nsNetDiskCache::GetStorageInUse(PRUint32 *aStorageInUse)
{
PRUint32 total_size = m_StorageInUse, len = 0 ;
// FUR!!
// GetStorageInUse() can be called hundreds of times per second, i.e. every
// time a buffer of data is written to the cache, so we can't afford to stat
// the db file on every call. I would suggest caching the size of the db and
// invalidating that cached value every time a record is written to the db,
// or even every ten written records.
// add the size of the db.
// m_DBFile->GetFileSize(&len) ;
// total_size += len ;
// we need size in kB
total_size = total_size >> 10 ;
*aStorageInUse = total_size ;
return NS_OK ;
}
/*
* The whole cache dirs can be whiped clean since all the cache
* files are resides in seperate hashed dirs. It's safe to do so.
*/
NS_IMETHODIMP
nsNetDiskCache::RemoveAll(void)
{
nsresult rv = RemoveDirs(0) ;
if(NS_FAILED(rv))
return rv ;
// don't forget the db file itself
m_DB->Shutdown() ;
nsFileSpec dbfile ;
m_DBFile->GetFileSpec(&dbfile) ;
dbfile.Delete(PR_TRUE) ;
// reinitilize
rv = InitDB() ;
if(NS_FAILED(rv))
return rv ;
rv = UpdateInfo() ;
return rv ;
}
//////////////////////////////////////////////////////////////////
// nsINetDataDiskCache methods
NS_IMETHODIMP
nsNetDiskCache::GetDiskCacheFolder(nsIFileSpec * *aDiskCacheFolder)
{
*aDiskCacheFolder = m_pDiskCacheFolder ;
NS_ADDREF(*aDiskCacheFolder) ;
return NS_OK ;
}
NS_IMETHODIMP
nsNetDiskCache::SetDiskCacheFolder(nsIFileSpec * aDiskCacheFolder)
{
char *newfolder, *oldfolder ;
m_pDiskCacheFolder->GetNativePath(&oldfolder) ;
aDiskCacheFolder->GetNativePath(&newfolder) ;
if(PL_strcmp(newfolder, oldfolder) == 0) {
m_pDiskCacheFolder = aDiskCacheFolder ;
// should we do this?
// FUR - no
nsresult rv = RemoveAll() ;
return rv ;
}
else
// FUR
// Need to blow away old cache, build new one
return NS_OK ;
}
//////////////////////////////////////////////////////////////////
// nsNetDiskCache methods
// create a directory (recursively)
NS_IMETHODIMP
nsNetDiskCache::CreateDir(nsIFileSpec* dir_spec)
{
PRBool does_exist ;
nsCOMPtr<nsIFileSpec> p_spec ;
dir_spec->Exists(&does_exist) ;
if(does_exist)
return NS_OK ;
dir_spec->GetParent(getter_AddRefs(p_spec)) ;
// FUR - check return value
p_spec->Exists(&does_exist) ;
if(!does_exist) {
CreateDir(p_spec) ;
dir_spec->CreateDir() ;
// FUR - check return value
}
else {
dir_spec->CreateDir() ;
// FUR - check return value
}
return NS_OK ;
}
// FUR!!
// We can't afford to make a *separate* pass over the whole db on every
// startup, just to figure out m_NumEntries and m_StorageInUse. (This is a
// several second operation on a large db). We'll likely need to store
// distinguished keys in the db that contain these values and update them
// incrementally, except when failure to shut down the db cleanly is detected.
// this will walk through db and update m_NumEntries and m_StorageInUse
NS_IMETHODIMP
nsNetDiskCache::UpdateInfo(void)
{
// count num of entries in db
// NS_ADDREF(this) ; // addref before assign to a nsCOMPtr.
nsISimpleEnumerator* dbEnumerator = new nsDBEnumerator(m_DB, this) ;
if(dbEnumerator)
NS_ADDREF(dbEnumerator) ;
else
return NS_ERROR_FAILURE ;
PRUint32 numEntries = 0, storageInUse = 0, len = 0 ;
PRBool more = PR_FALSE ;
do {
dbEnumerator->HasMoreElements(&more) ;
if(more) {
// update entry number
numEntries++ ;
// update storage in use
nsINetDataCacheRecord* record ;
dbEnumerator->GetNext((nsISupports**)&record) ;
record->GetStoredContentLength(&len) ;
storageInUse += len ;
NS_IF_RELEASE(record) ;
}
} while (more) ;
NS_IF_RELEASE(dbEnumerator) ;
m_NumEntries = numEntries ;
m_StorageInUse = storageInUse ;
printf(" m_NumEntries = %d, size is %d.\n", m_NumEntries, m_StorageInUse) ;
return NS_OK ;
}
// this routine will add m_BaseDirNum to current CacheSubDir names.
// e.g. 00->20, 1f->5f. and update the m_BaseDirNum to another 32.
// the idea is as long as we remember the base number,
// we know how many dirs needs to be removed during shutdown period
// it will be from 0x20 to m_BaseDirNum.
// also, we assume that this operation will not be performed 3 times more
// within a single session. it is part of scavenging routine.
NS_IMETHODIMP
nsNetDiskCache::RenameCacheSubDirs(void)
{
nsCOMPtr<nsIFileSpec> cacheSubDir;
nsresult rv = NS_NewFileSpec(getter_AddRefs(cacheSubDir)) ;
for (int i=0; i < 32; i++) {
rv = cacheSubDir->FromFileSpec(m_pDiskCacheFolder) ;
if(NS_FAILED(rv))
return rv ;
char dirName[3];
PR_snprintf(dirName, 3, "%0.2x", i) ;
cacheSubDir->AppendRelativeUnixPath(dirName) ;
// re-name the directory
PR_snprintf(dirName, 3, "%0.2x", i+m_BaseDirNum) ;
rv = cacheSubDir->Rename(dirName) ;
if(NS_FAILED(rv))
return NS_ERROR_FAILURE ;
}
// update m_BaseDirNum
m_BaseDirNum += 32 ;
return NS_OK ;
}
// this routine will be called everytime we have a db corruption.
NS_IMETHODIMP
nsNetDiskCache::DBRecovery(void)
{
nsresult rv = RenameCacheSubDirs() ;
if(NS_FAILED(rv))
return rv ;
// remove corrupted db file
rv = m_DB->Shutdown() ;
// FUR!!
// You shouldn't return if this fails. Otherwise, it might prevent db deletion
if(NS_FAILED(rv))
return rv ;
nsFileSpec dbfile ;
m_DBFile->GetFileSpec(&dbfile) ;
dbfile.Delete(PR_TRUE) ;
// make sure it's not there any more
PRBool exists = dbfile.Exists() ;
if(exists) {
NS_ERROR("can't remove old db.") ;
return NS_ERROR_FAILURE ;
}
// reinitilize
rv = InitDB() ;
if(NS_FAILED(rv))
return rv ;
rv = UpdateInfo() ;
return rv ;
}
// this routine is used by dtor and RemoveAll() to clean up dirs.
// All directory named from aNum - m_BasedDirNum will be deleted.
NS_IMETHODIMP
nsNetDiskCache::RemoveDirs(PRUint32 aNum)
{
nsCOMPtr<nsIFileSpec> cacheSubDir;
nsresult rv = NS_NewFileSpec(getter_AddRefs(cacheSubDir));
if(NS_FAILED(rv))
return NS_ERROR_FAILURE ;
for (int i=aNum; i < m_BaseDirNum; i++) {
cacheSubDir->FromFileSpec(m_pDiskCacheFolder) ;
char dirName[3];
PR_snprintf (dirName, 3, "%0.2x", i);
cacheSubDir->AppendRelativeUnixPath (dirName) ;
nsFileSpec subdir ;
cacheSubDir->GetFileSpec(&subdir) ;
for(nsDirectoryIterator di(subdir, PR_FALSE); di.Exists(); di++) {
di.Spec().Delete(PR_TRUE) ;
}
subdir.Delete(PR_FALSE) ; // recursive delete
}
return NS_OK ;
}

View File

@@ -0,0 +1,82 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
// FUR - Add overall description comment here
#ifndef __gen_nsNetDiskCache_h__
#define __gen_nsNetDiskCache_h__
#include "nsINetDataDiskCache.h"
#include "nsNetDiskCacheCID.h"
#include "nsCOMPtr.h"
#include "nsIPref.h"
#include "nsDBAccessor.h"
class nsIURI; /* forward decl */
class nsICachedNetData; /* forward decl */
class nsISimpleEnumerator; /* forward decl */
class nsIFileSpec; /* forward decl */
/* starting interface: nsNetDiskCache */
class nsNetDiskCache : public nsINetDataDiskCache {
public:
NS_DECL_ISUPPORTS
NS_DECL_NSINETDATACACHE
NS_DECL_NSINETDATADISKCACHE
NS_IMETHOD Init(void) ;
nsNetDiskCache() ;
virtual ~nsNetDiskCache() ;
protected:
NS_IMETHOD InitDB(void) ;
NS_IMETHOD CreateDir(nsIFileSpec* dir_spec) ;
NS_IMETHOD UpdateInfo(void) ;
NS_IMETHOD RenameCacheSubDirs(void) ;
NS_IMETHOD DBRecovery(void) ;
NS_IMETHOD RemoveDirs(PRUint32 aNum) ;
private:
PRBool m_Enabled ;
PRUint32 m_NumEntries ;
nsCOMPtr<nsINetDataCache> m_pNextCache ;
nsCOMPtr<nsIFileSpec> m_pDiskCacheFolder ;
nsCOMPtr<nsIFileSpec> m_DBFile ;
PRUint32 m_MaxEntries ;
PRUint32 m_StorageInUse ;
nsIDBAccessor* m_DB ;
// this is used to indicate a db corruption
PRInt32 m_BaseDirNum ;
friend class nsDiskCacheRecord ;
friend class nsDiskCacheRecordChannel ;
} ;
#endif /* __gen_nsNetDiskCache_h__ */

View File

@@ -0,0 +1,32 @@
/*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator.
*
* The Initial Developer of the Original Code is Intel Corp.
* Portions created by Intel Corp. are
* Copyright (C) 1999, 1999 Intel Corp. All
* Rights Reserved.
*
* Contributor(s): Yixiong Zou <yixiong.zou@intel.com>
* Carl Wong <carl.wong@intel.com>
*/
#ifndef _nsNetDiskCacheCID_h_
#define _nsNetDiskCacheCID_h_
#define NS_NETDISKCACHE_CID_STR "ECFEEA00-7201-11d3-87FE-000629D01344"
#define NS_NETDISKCACHE_CID \
{ 0xecfeea00, 0x7201, 0x11d3, \
{ 0x87, 0xfe, 0x0, 0x6, 0x29, 0xd0, 0x13, 0x44 }}
#endif /* _nsNetDiskCacheCID_h_ */

View File

@@ -0,0 +1,50 @@
#
# The contents of this file are subject to the Netscape Public License
# Version 1.0 (the "NPL"); you may not use this file except in
# compliance with the NPL. You may obtain a copy of the NPL at
# http://www.mozilla.org/NPL/
#
# Software distributed under the NPL is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
# for the specific language governing rights and limitations under the
# NPL.
#
# The Initial Developer of this code under the NPL is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All Rights
# Reserved.
#
DEPTH = ../../../..
topsrcdir = @top_srcdir@
srcdir = @srcdir@
VPATH = @srcdir@
include $(DEPTH)/config/autoconf.mk
REQUIRES = libreg xpcom
CPPSRCS = \
diskcache.cpp \
$(NULL)
SIMPLE_PROGRAMS = $(CPPSRCS:.cpp=)
ifdef NO_LD_ARCHIVE_FLAGS
LOST_SYM_LIBS = -lxpcomds_s -lxptinfo -lmozreg_s
endif
LIBS = \
-lmozjs \
-lxpcom \
-lmozdbm_s \
$(MOZ_NECKO_UTIL_LIBS) \
$(LOST_SYM_LIBS) \
$(NSPR_LIBS) \
$(NULL)
include $(topsrcdir)/config/rules.mk
LOCAL_INCLUDES = -I$(srcdir)/..
DEFINES += -DUSE_NSREG -DCACHE

View File

@@ -0,0 +1,836 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
#include "nsIStreamListener.h"
#include "nsIStreamObserver.h"
#include "nsIServiceManager.h"
#include "nsIInputStream.h"
#include "nsIOutputStream.h"
#include "nsIEventQueue.h"
#include "nsIEventQueueService.h"
#include "nsIChannel.h"
#include "nsCOMPtr.h"
#include "nsString.h"
#include <stdio.h>
#include <unistd.h>
#include "nsINetDataCache.h"
#include "nsINetDataCacheRecord.h"
//#include "nsMemCacheCID.h"
#include "nsNetDiskCache.h"
#include "nsIPref.h"
#include "prenv.h"
#include "nsIFileStream.h"
// Number of test entries to be placed in the cache
#define NUM_CACHE_ENTRIES 250
// Cache content stream length will have random length between zero and
// MAX_CONTENT_LENGTH bytes
#define MAX_CONTENT_LENGTH 20000
// Length of random-data cache entry key
#define CACHE_KEY_LENGTH 15
// Length of random-data cache entry meta-data
#define CACHE_METADATA_LENGTH 100
//static NS_DEFINE_CID(kMemCacheCID, NS_MEM_CACHE_FACTORY_CID);
static NS_DEFINE_CID(kEventQueueServiceCID, NS_EVENTQUEUESERVICE_CID);
static NS_DEFINE_CID(kDiskCacheCID, NS_NETDISKCACHE_CID) ;
static NS_DEFINE_CID(kPrefCID, NS_PREF_CID);
static NS_DEFINE_IID(kIPrefIID, NS_IPREF_IID);
// Mapping from test case number to RecordID
static PRInt32 recordID[NUM_CACHE_ENTRIES];
static PRInt32
mapRecordIdToTestNum(PRInt32 aRecordID)
{
int i;
for (i = 0; i < NUM_CACHE_ENTRIES; i++) {
if (recordID[i] == aRecordID)
return i;
}
return -1;
}
// A supply of stream data to either store or compare with
class nsITestDataStream {
public:
virtual ~nsITestDataStream() {};
virtual PRUint32 Next() = 0;
virtual void Read(char* aBuf, PRUint32 aCount) = 0;
virtual PRBool Match(char* aBuf, PRUint32 aCount) = 0;
virtual void Skip(PRUint32 aCount) = 0;
};
// A reproducible stream of random data.
class RandomStream : public nsITestDataStream {
public:
RandomStream(PRUint32 aSeed) {
mStartSeed = mState = aSeed;
}
PRUint32 GetStartSeed() {
return mStartSeed;
}
PRUint32 Next() {
mState = 1103515245 * mState + 12345;
return mState;
}
void Read(char* aBuf, PRUint32 aCount) {
PRUint32 i;
for (i = 0; i < aCount; i++) {
*aBuf++ = Next();
}
}
PRBool
Match(char* aBuf, PRUint32 aCount) {
PRUint32 i;
for (i = 0; i < aCount; i++) {
if (*aBuf++ != (char)(Next() & 0xff))
return PR_FALSE;
}
return PR_TRUE;
}
void
Skip(PRUint32 aCount) {
while (aCount--)
Next();
}
protected:
PRUint32 mState;
PRUint32 mStartSeed;
};
// A stream of data that increments on each byte that is read, modulo 256
class CounterStream : public nsITestDataStream {
public:
CounterStream(PRUint32 aSeed) {
mStartSeed = mState = aSeed;
}
PRUint32 GetStartSeed() {
return mStartSeed;
}
PRUint32 Next() {
mState += 1;
mState &= 0xff;
return mState;
}
void Read(char* aBuf, PRUint32 aCount) {
PRUint32 i;
for (i = 0; i < aCount; i++) {
*aBuf++ = Next();
}
}
PRBool
Match(char* aBuf, PRUint32 aCount) {
PRUint32 i;
for (i = 0; i < aCount; i++) {
if (*aBuf++ != (char)Next())
return PR_FALSE;
}
return PR_TRUE;
}
void
Skip(PRUint32 aCount) {
mState += aCount;
mState &= 0xff;
}
protected:
PRUint32 mState;
PRUint32 mStartSeed;
};
static int gNumReaders = 0;
static PRUint32 gTotalBytesRead = 0;
static PRUint32 gTotalDuration = 0;
class nsReader : public nsIStreamListener {
public:
NS_DECL_ISUPPORTS
nsReader()
: mStartTime(0), mBytesRead(0)
{
NS_INIT_REFCNT();
gNumReaders++;
}
virtual ~nsReader() {
delete mTestDataStream;
gNumReaders--;
}
nsresult
Init(nsIChannel *aChannel, nsITestDataStream* aRandomStream, PRUint32 aExpectedStreamLength) {
mChannel = aChannel;
mTestDataStream = aRandomStream;
mExpectedStreamLength = aExpectedStreamLength;
mRefCnt = 1;
return NS_OK;
}
NS_IMETHOD OnStartRequest(nsIChannel* channel,
nsISupports* context) {
mStartTime = PR_IntervalNow();
return NS_OK;
}
NS_IMETHOD OnDataAvailable(nsIChannel* channel,
nsISupports* context,
nsIInputStream *aIStream,
PRUint32 aSourceOffset,
PRUint32 aLength) {
char buf[1025];
while (aLength > 0) {
PRUint32 amt;
PRBool match;
aIStream->Read(buf, sizeof buf, &amt);
if (amt == 0) break;
aLength -= amt;
mBytesRead += amt;
match = mTestDataStream->Match(buf, amt);
NS_ASSERTION(match, "Stored data was corrupted on read");
}
return NS_OK;
}
NS_IMETHOD OnStopRequest(nsIChannel* channel,
nsISupports* context,
nsresult aStatus,
const PRUnichar* aMsg) {
PRIntervalTime endTime;
PRIntervalTime duration;
endTime = PR_IntervalNow();
duration = (endTime - mStartTime);
if (NS_FAILED(aStatus)) printf("channel failed.\n");
// printf("read %d bytes\n", mBytesRead);
NS_ASSERTION(mBytesRead == mExpectedStreamLength,
"Stream in cache is wrong length");
gTotalBytesRead += mBytesRead;
gTotalDuration += duration;
// Release channel
mChannel = 0;
return NS_OK;
}
protected:
PRIntervalTime mStartTime;
PRUint32 mBytesRead;
nsITestDataStream* mTestDataStream;
PRUint32 mExpectedStreamLength;
nsCOMPtr<nsIChannel> mChannel;
};
NS_IMPL_ISUPPORTS2(nsReader, nsIStreamListener, nsIStreamObserver)
static nsIEventQueue* eventQueue;
nsresult
InitQueue() {
nsresult rv;
NS_WITH_SERVICE(nsIEventQueueService, eventQService, kEventQueueServiceCID, &rv);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get event queue service");
rv = eventQService->CreateThreadEventQueue();
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't create event queue");
rv = eventQService->GetThreadEventQueue(PR_CurrentThread(), &eventQueue);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get event queue for main thread");
return NS_OK;
}
// Process events until all streams are OnStopRequest'ed
nsresult
WaitForEvents() {
while (gNumReaders) {
eventQueue->ProcessPendingEvents();
}
return NS_OK;
}
// Read data for a single cache record and compare against testDataStream
nsresult
TestReadStream(nsINetDataCacheRecord *record, nsITestDataStream *testDataStream,
PRUint32 expectedStreamLength)
{
nsCOMPtr<nsIChannel> channel;
nsresult rv;
PRUint32 actualContentLength;
rv = record->NewChannel(0, getter_AddRefs(channel));
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
rv = record->GetStoredContentLength(&actualContentLength);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
NS_ASSERTION(actualContentLength == expectedStreamLength,
"nsINetDataCacheRecord::GetContentLength() busted ?");
nsReader *reader = new nsReader;
reader->AddRef();
rv = reader->Init(channel, testDataStream, expectedStreamLength);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
rv = channel->AsyncRead(0, -1, 0, reader);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
reader->Release();
return NS_OK;
}
// Check that records can be retrieved using their record-ID, in addition
// to using the opaque key.
nsresult
TestRecordID(nsINetDataCache *cache)
{
nsresult rv;
nsCOMPtr<nsINetDataCacheRecord> record;
RandomStream *randomStream;
PRUint32 metaDataLength;
char cacheKey[CACHE_KEY_LENGTH];
char *metaData;
PRUint32 testNum;
PRBool match;
for (testNum = 0; testNum < NUM_CACHE_ENTRIES; testNum++) {
randomStream = new RandomStream(testNum);
randomStream->Read(cacheKey, sizeof cacheKey);
rv = cache->GetCachedNetDataByID(recordID[testNum], getter_AddRefs(record));
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't obtain record using record ID");
// Match against previously stored meta-data
rv = record->GetMetaData(&metaDataLength, &metaData);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get record meta-data");
match = randomStream->Match(metaData, metaDataLength);
NS_ASSERTION(match, "Meta-data corrupted or incorrect");
nsAllocator::Free(metaData);
delete randomStream;
}
return NS_OK;
}
// Check that all cache entries in the database are enumerated and that
// no duplicates appear.
nsresult
TestEnumeration(nsINetDataCache *cache)
{
nsresult rv;
nsCOMPtr<nsINetDataCacheRecord> record;
nsCOMPtr<nsISupports> tempISupports;
nsCOMPtr<nsISimpleEnumerator> iterator;
RandomStream *randomStream;
PRUint32 metaDataLength;
char cacheKey[CACHE_KEY_LENGTH];
char *metaData;
PRUint32 testNum;
PRBool match;
PRInt32 recID;
int numRecords = 0;
// Iterate over all records in the cache
rv = cache->NewCacheEntryIterator(getter_AddRefs(iterator));
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't create new cache entry iterator");
PRBool notDone;
while (1) {
// Done iterating ?
rv = iterator->HasMoreElements(&notDone);
if (NS_FAILED(rv)) return rv;
if (!notDone)
break;
// Get next record in iteration
rv = iterator->GetNext(getter_AddRefs(tempISupports));
NS_ASSERTION(NS_SUCCEEDED(rv), "iterator bustage");
record = do_QueryInterface(tempISupports);
numRecords++;
// Get record ID
rv = record->GetRecordID(&recID);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get Record ID");
testNum = mapRecordIdToTestNum(recID);
NS_ASSERTION(testNum != -1, "Corrupted Record ID ?");
// Erase mapping from table, so that duplicate enumerations are detected
recordID[testNum] = -1;
// Make sure stream matches test data
randomStream = new RandomStream(testNum);
randomStream->Read(cacheKey, sizeof cacheKey);
// Match against previously stored meta-data
rv = record->GetMetaData(&metaDataLength, &metaData);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get record meta-data");
match = randomStream->Match(metaData, metaDataLength);
NS_ASSERTION(match, "Meta-data corrupted or incorrect");
nsAllocator::Free(metaData);
delete randomStream;
}
NS_ASSERTION(numRecords == NUM_CACHE_ENTRIES, "Iteration bug");
return NS_OK;
}
// Read the test data that was written in FillCache(), checking for
// corruption, truncation.
nsresult
TestRead(nsINetDataCache *cache)
{
nsresult rv;
PRBool inCache;
nsCOMPtr<nsINetDataCacheRecord> record;
RandomStream *randomStream;
PRUint32 metaDataLength;
char cacheKey[CACHE_KEY_LENGTH];
char *metaData, *storedCacheKey;
PRUint32 testNum, storedCacheKeyLength;
PRBool match;
for (testNum = 0; testNum < NUM_CACHE_ENTRIES; testNum++) {
randomStream = new RandomStream(testNum);
randomStream->Read(cacheKey, sizeof cacheKey);
// Ensure that entry is in the cache
rv = cache->Contains(cacheKey, sizeof cacheKey, &inCache);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
NS_ASSERTION(inCache, "nsINetDataCache::Contains error");
rv = cache->GetCachedNetData(cacheKey, sizeof cacheKey, getter_AddRefs(record));
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
// Match against previously stored meta-data
match = record->GetMetaData(&metaDataLength, &metaData);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
match = randomStream->Match(metaData, metaDataLength);
NS_ASSERTION(match, "Meta-data corrupted or incorrect");
nsAllocator::Free(metaData);
// Test GetKey() method
rv = record->GetKey(&storedCacheKeyLength, &storedCacheKey);
NS_ASSERTION(NS_SUCCEEDED(rv) &&
(storedCacheKeyLength == sizeof cacheKey) &&
!memcmp(storedCacheKey, &cacheKey[0], sizeof cacheKey),
"nsINetDataCacheRecord::GetKey failed");
nsAllocator::Free(storedCacheKey);
PRUint32 expectedStreamLength = randomStream->Next() & 0xffff;
TestReadStream(record, randomStream, expectedStreamLength);
}
WaitForEvents();
// Compute rate in MB/s
double rate = gTotalBytesRead / PR_IntervalToMilliseconds(gTotalDuration);
rate *= NUM_CACHE_ENTRIES;
rate *= 1000;
rate /= (1024 * 1024);
printf("Read %d bytes at a rate of %5.1f MB per second \n",
gTotalBytesRead, rate);
return NS_OK;
}
// Repeatedly call SetStoredContentLength() on a cache entry and make
// read the stream's data to ensure that it's not corrupted by the effect
nsresult
TestTruncation(nsINetDataCache *cache)
{
nsresult rv;
nsCOMPtr<nsINetDataCacheRecord> record;
RandomStream *randomStream;
char cacheKey[CACHE_KEY_LENGTH];
randomStream = new RandomStream(0);
randomStream->Read(cacheKey, sizeof cacheKey);
rv = cache->GetCachedNetData(cacheKey, sizeof cacheKey, getter_AddRefs(record));
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
randomStream->Skip(CACHE_METADATA_LENGTH);
PRUint32 initialStreamLength = randomStream->Next() & 0xffff;
delete randomStream;
PRUint32 i;
PRUint32 delta = initialStreamLength / 64;
for (i = initialStreamLength; i >= delta; i -= delta) {
PRUint32 expectedStreamLength = i;
// Do the truncation
record->SetStoredContentLength(expectedStreamLength);
randomStream = new RandomStream(0);
randomStream->Skip(CACHE_KEY_LENGTH + CACHE_METADATA_LENGTH + 1);
TestReadStream(record, randomStream, expectedStreamLength);
WaitForEvents();
}
return NS_OK;
}
// Write known data to random offsets in a single cache entry and test
// resulting stream for correctness.
nsresult
TestOffsetWrites(nsINetDataCache *cache)
{
nsresult rv;
nsCOMPtr<nsINetDataCacheRecord> record;
nsCOMPtr<nsIChannel> channel;
nsCOMPtr<nsIOutputStream> outStream;
char buf[512];
char cacheKey[CACHE_KEY_LENGTH];
RandomStream *randomStream;
randomStream = new RandomStream(0);
randomStream->Read(cacheKey, sizeof cacheKey);
rv = cache->GetCachedNetData(cacheKey, sizeof cacheKey, getter_AddRefs(record));
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't access record via opaque cache key");
nsCOMPtr<nsIFileSpec> file ;
record->GetFilename(getter_AddRefs(file)) ;
char* name ;
file->GetUnixStyleFilePath(&name) ;
printf(" file name is %s \n", name) ;
// Write buffer-fulls of data at random offsets into the cache entry.
// Data written is (offset % 0xff)
PRUint32 startingOffset;
PRUint32 streamLength = 0;
PRUint32 len = 0 ;
CounterStream *counterStream;
int i = 0;
for (i = 0; i < 257; i++) {
rv = record->NewChannel(0, getter_AddRefs(channel));
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
startingOffset = streamLength ? streamLength - (randomStream->Next() % sizeof buf): 0;
rv = channel->OpenOutputStream(startingOffset, getter_AddRefs(outStream));
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
truncate(name, startingOffset) ;
counterStream = new CounterStream(startingOffset);
counterStream->Read(buf, sizeof buf);
nsresult status ;
nsCOMPtr<nsIRandomAccessStore> ras = do_QueryInterface(outStream, &status);
if (NS_FAILED(status)) {
// mState = END_WRITE;
return NS_ERROR_FAILURE;
}
PRIntn offset ;
ras->Tell(&offset) ;
// printf(" offset is %d \n", offset) ;
PRUint32 numWritten;
rv = outStream->Write(buf, sizeof buf, &numWritten);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
NS_ASSERTION(numWritten == sizeof buf, "Write() bug?");
streamLength = startingOffset + sizeof buf;
rv = outStream->Close();
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't close channel");
delete counterStream;
record->GetStoredContentLength(&len) ;
if(len != streamLength)
printf(" offset = %d is wrong, filesize = %d\n", startingOffset, len) ;
}
/*
rv = record->NewChannel(0, getter_AddRefs(channel));
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
startingOffset = 208;
rv = channel->OpenOutputStream(startingOffset, getter_AddRefs(outStream));
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
counterStream = new CounterStream(startingOffset);
counterStream->Read(buf, sizeof buf);
nsresult status ;
nsCOMPtr<nsIRandomAccessStore> ras = do_QueryInterface(outStream, &status);
if (NS_FAILED(status)) {
// mState = END_WRITE;
return NS_ERROR_FAILURE;
}
PRIntn offset = 0 ;
ras->Tell(&offset) ;
printf(" offset is %d \n", offset) ;
PRUint32 numWritten;
rv = outStream->Write(buf, sizeof buf, &numWritten);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
NS_ASSERTION(numWritten == sizeof buf, "Write() bug?");
streamLength = startingOffset + sizeof buf;
rv = outStream->Close();
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't close channel");
delete counterStream;
record->GetStoredContentLength(&len) ;
if(len != streamLength)
printf(" offset = %d is wrong, filesize = %d\n", startingOffset, len) ;
*/
delete randomStream;
counterStream = new CounterStream(0);
TestReadStream(record, counterStream, streamLength);
WaitForEvents();
return NS_OK;
}
// Create entries in the network data cache, using random data for the
// key, the meta-data and the stored content data.
nsresult
FillCache(nsINetDataCache *cache)
{
nsresult rv;
PRBool inCache;
nsCOMPtr<nsINetDataCacheRecord> record;
nsCOMPtr<nsIChannel> channel;
nsCOMPtr<nsIOutputStream> outStream;
char buf[1000];
PRUint32 metaDataLength;
char cacheKey[CACHE_KEY_LENGTH];
char metaData[CACHE_METADATA_LENGTH];
PRUint32 testNum;
char *data;
RandomStream *randomStream;
PRIntervalTime startTime = PR_IntervalNow();
for (testNum = 0; testNum < NUM_CACHE_ENTRIES; testNum++) {
randomStream = new RandomStream(testNum);
randomStream->Read(cacheKey, sizeof cacheKey);
// No entry should be in cache until we add it
rv = cache->Contains(cacheKey, sizeof cacheKey, &inCache);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
NS_ASSERTION(!inCache, "nsINetDataCache::Contains error");
rv = cache->GetCachedNetData(cacheKey, sizeof cacheKey, getter_AddRefs(record));
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't access record via opaque cache key");
// Test nsINetDataCacheRecord::GetRecordID()
rv = record->GetRecordID(&recordID[testNum]);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get Record ID");
// Test nsINetDataCache::GetNumEntries()
PRUint32 numEntries = (PRUint32)-1;
rv = cache->GetNumEntries(&numEntries);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get number of cache entries");
NS_ASSERTION(numEntries == testNum + 1, "GetNumEntries failure");
// Record meta-data should be initially empty
rv = record->GetMetaData(&metaDataLength, &data);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
if ((metaDataLength != 0) || (data != 0))
return NS_ERROR_FAILURE;
// Store random data as meta-data
randomStream->Read(metaData, sizeof metaData);
record->SetMetaData(sizeof metaData, metaData);
rv = record->NewChannel(0, getter_AddRefs(channel));
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
rv = channel->OpenOutputStream(0, getter_AddRefs(outStream));
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
PRUint32 beforeOccupancy;
rv = cache->GetStorageInUse(&beforeOccupancy);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get cache occupancy");
int streamLength = randomStream->Next() & 0xffff;
int remaining = streamLength;
while (remaining) {
PRUint32 numWritten;
int amount = PR_MIN(sizeof buf, remaining);
randomStream->Read(buf, amount);
rv = outStream->Write(buf, amount, &numWritten);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
NS_ASSERTION(numWritten == (PRUint32)amount, "Write() bug?");
remaining -= amount;
}
outStream->Close();
PRUint32 afterOccupancy;
rv = cache->GetStorageInUse(&afterOccupancy);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get cache occupancy");
PRUint32 streamLengthInKB = streamLength >> 10;
NS_ASSERTION((afterOccupancy - beforeOccupancy) >= streamLengthInKB,
"nsINetDataCache::GetStorageInUse() is busted");
// *Now* there should be an entry in the cache
rv = cache->Contains(cacheKey, sizeof cacheKey, &inCache);
NS_ASSERTION(NS_SUCCEEDED(rv), " ");
NS_ASSERTION(inCache, "nsINetDataCache::Contains error");
delete randomStream;
}
PRIntervalTime endTime = PR_IntervalNow();
return NS_OK;
}
nsresult NS_AutoregisterComponents()
{
nsresult rv = nsComponentManager::AutoRegister(nsIComponentManager::NS_Startup,
NULL /* default */);
return rv;
}
PRBool initPref ()
{
nsresult rv;
NS_WITH_SERVICE(nsIPref, prefPtr, kPrefCID, &rv);
if (NS_FAILED(rv))
return false;
nsCOMPtr<nsIFileSpec> fileSpec;
rv = NS_NewFileSpec (getter_AddRefs(fileSpec));
if (NS_FAILED(rv))
return false;
nsCString defaultPrefFile = PR_GetEnv ("MOZILLA_FIVE_HOME");
if (defaultPrefFile.Length())
defaultPrefFile += "/";
else
defaultPrefFile = "./";
defaultPrefFile += "default_prefs.js";
fileSpec->SetUnixStyleFilePath (defaultPrefFile.GetBuffer());
PRBool exists = false;
fileSpec->Exists(&exists);
if (exists)
prefPtr->ReadUserPrefsFrom(fileSpec);
else
return false;
return true;
}
int
main(int argc, char* argv[])
{
initPref() ;
nsresult rv;
nsCOMPtr<nsINetDataCache> cache;
rv = NS_AutoregisterComponents();
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't register XPCOM components");
rv = nsComponentManager::CreateInstance(kDiskCacheCID, nsnull,
NS_GET_IID(nsINetDataCache),
getter_AddRefs(cache));
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't create memory cache factory");
InitQueue();
PRUnichar* description;
rv = cache->GetDescription(&description);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get cache description");
nsCAutoString descStr(description);
printf("Testing: %s\n", descStr.GetBuffer());
rv = cache->RemoveAll();
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't clear cache");
PRUint32 startOccupancy;
rv = cache->GetStorageInUse(&startOccupancy);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get cache occupancy");
PRUint32 numEntries = (PRUint32)-1;
rv = cache->GetNumEntries(&numEntries);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get number of cache entries");
NS_ASSERTION(numEntries == 0, "Couldn't clear cache");
rv = FillCache(cache);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't fill cache with random test data");
rv = TestRead(cache);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't read random test data from cache");
rv = TestRecordID(cache);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't index records using record ID");
rv = TestEnumeration(cache);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't successfully enumerate records");
rv = TestTruncation(cache);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't successfully truncate records");
rv = TestOffsetWrites(cache);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't successfully write to records using non-zero offsets");
rv = cache->RemoveAll();
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't clear cache");
rv = cache->GetNumEntries(&numEntries);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get number of cache entries");
NS_ASSERTION(numEntries == 0, "Couldn't clear cache");
PRUint32 endOccupancy;
rv = cache->GetStorageInUse(&endOccupancy);
NS_ASSERTION(NS_SUCCEEDED(rv), "Couldn't get cache occupancy");
NS_ASSERTION(startOccupancy == endOccupancy, "Cache occupancy not correctly computed ?");
return 0;
}

View File

@@ -0,0 +1,46 @@
# Generated automatically from Makefile.in by configure.
#
# The contents of this file are subject to the Netscape Public License
# Version 1.0 (the "NPL"); you may not use this file except in
# compliance with the NPL. You may obtain a copy of the NPL at
# http://www.mozilla.org/NPL/
#
# Software distributed under the NPL is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
# for the specific language governing rights and limitations under the
# NPL.
#
# The Initial Developer of this code under the NPL is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All Rights
# Reserved.
#
DEPTH = ../../..
topsrcdir = @top_srcdir@
VPATH = @srcdir@
srcdir = @srcdir@
include $(DEPTH)/config/autoconf.mk
MODULE = nkcache
LIBRARY_NAME = nkmemcache_s
REQUIRES = nspr dbm
EXPORTS=nsMemCacheCID.h
CPPSRCS = \
nsMemCache.cpp \
nsMemCacheRecord.cpp \
nsMemCacheChannel.cpp \
$(NULL)
# we don't want the shared lib, but we want to force the creation of a
# static lib.
override NO_SHARED_LIB=1
override NO_STATIC_LIB=
include $(topsrcdir)/config/rules.mk

View File

@@ -0,0 +1,42 @@
#!nmake
#
# The contents of this file are subject to the Netscape Public License
# Version 1.0 (the "NPL"); you may not use this file except in
# compliance with the NPL. You may obtain a copy of the NPL at
# http://www.mozilla.org/NPL/
#
# Software distributed under the NPL is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
# for the specific language governing rights and limitations under the
# NPL.
#
# The Initial Developer of this code under the NPL is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All Rights
# Reserved.
DEPTH=..\..\..
include <$(DEPTH)/config/config.mak>
MODULE = nkcache
LIBRARY_NAME = nkmemcache_s
CPP_OBJS = \
.\$(OBJDIR)\nsMemCache.obj \
.\$(OBJDIR)\nsMemCacheRecord.obj \
.\$(OBJDIR)\nsMemCacheChannel.obj \
$(NULL)
EXPORTS=nsMemCacheCID.h
include <$(DEPTH)\config\rules.mak>
install:: $(LIBRARY)
$(MAKE_INSTALL) $(LIBRARY) $(DIST)\lib
clobber::
rm -rf $(OBJDIR)
rm -f $(DIST)\lib\$(LIBRARY_NAME).lib

View File

@@ -0,0 +1,334 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
*/
/**
* nsMemCache is the implementation of an in-memory network-data
* cache, used to cache the responses to network retrieval commands.
* Each cache entry may contain both content, e.g. GIF image data, and
* associated metadata, e.g. HTTP headers. Each entry is indexed by
* two different keys: a record id number and an opaque key, which is
* created by the cache manager by combining the URI with a "secondary
* key", e.g. HTTP post data.
*/
#include "nsMemCache.h"
#include "nsMemCacheRecord.h"
#include "nsIGenericFactory.h"
#include "nsString.h"
#include "nsHashtable.h"
#include "nsHashtableEnumerator.h"
#include "nsEnumeratorUtils.h"
PRInt32 nsMemCache::gRecordSerialNumber = 0;
nsMemCache::nsMemCache()
: mNumEntries(0), mOccupancy(0), mEnabled(PR_TRUE),
mHashTable(0)
{
NS_INIT_REFCNT();
}
nsMemCache::~nsMemCache()
{
nsresult rv;
rv = RemoveAll();
NS_ASSERTION(NS_SUCCEEDED(rv) && (mNumEntries == 0),
"Failure to shut down memory cache. "
"Somewhere, someone is holding references to at least one cache record");
delete mHashTable;
}
nsresult
nsMemCache::Init()
{
mHashTable = new nsHashtable(256);
if (!mHashTable)
return NS_ERROR_OUT_OF_MEMORY;
return NS_OK;
}
NS_IMPL_ISUPPORTS(nsMemCache, NS_GET_IID(nsINetDataCache))
NS_IMETHODIMP
nsMemCache::GetDescription(PRUnichar * *aDescription)
{
nsAutoString description("Memory Cache");
*aDescription = description.ToNewUnicode();
if (!*aDescription)
return NS_ERROR_OUT_OF_MEMORY;
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::Contains(const char *aKey, PRUint32 aKeyLength, PRBool *aFound)
{
nsOpaqueKey *opaqueKey = new nsOpaqueKey(aKey, aKeyLength);
if (!opaqueKey)
return NS_ERROR_OUT_OF_MEMORY;
*aFound = mHashTable->Exists(opaqueKey);
delete opaqueKey;
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::GetCachedNetData(const char *aKey, PRUint32 aKeyLength,
nsINetDataCacheRecord* *aRecord)
{
nsresult rv;
nsMemCacheRecord* record = 0;
nsOpaqueKey *opaqueKey2 = 0;
nsOpaqueKey *opaqueKey3 = 0;
nsOpaqueKey *opaqueKey;
opaqueKey = new nsOpaqueKey(aKey, aKeyLength);
if (!opaqueKey)
goto out_of_memory;
record = (nsMemCacheRecord*)mHashTable->Get(opaqueKey);
delete opaqueKey;
// No existing cache database entry was found. Create a new one.
// This requires two mappings in the hash table:
// Record ID ==> record
// Opaque key ==> record
if (!record) {
record = new nsMemCacheRecord;
if (!record)
goto out_of_memory;
rv = record->Init(aKey, aKeyLength, ++gRecordSerialNumber, this);
if (NS_FAILED(rv)) goto out_of_memory;
// Index the record by opaque key
opaqueKey2 = new nsOpaqueKey(record->mKey, record->mKeyLength);
if (!opaqueKey2) goto out_of_memory;
mHashTable->Put(opaqueKey2, record);
// Index the record by it's record ID
char *recordIDbytes = NS_REINTERPRET_CAST(char *, &record->mRecordID);
opaqueKey3 = new nsOpaqueKey(recordIDbytes,
sizeof record->mRecordID);
if (!opaqueKey3) {
// Clean up the first record from the hash table
mHashTable->Remove(opaqueKey);
goto out_of_memory;
}
mHashTable->Put(opaqueKey3, record);
// The hash table holds on to the record
record->AddRef();
delete opaqueKey2;
delete opaqueKey3;
mNumEntries++;
}
record->AddRef();
*aRecord = record;
return NS_OK;
out_of_memory:
delete opaqueKey2;
delete opaqueKey3;
delete record;
return NS_ERROR_OUT_OF_MEMORY;
}
NS_IMETHODIMP
nsMemCache::GetCachedNetDataByID(PRInt32 RecordID,
nsINetDataCacheRecord* *aRecord)
{
nsOpaqueKey opaqueKey(NS_REINTERPRET_CAST(const char *, &RecordID),
sizeof RecordID);
*aRecord = (nsINetDataCacheRecord*)mHashTable->Get(&opaqueKey);
if (*aRecord) {
NS_ADDREF(*aRecord);
return NS_OK;
}
return NS_ERROR_FAILURE;
}
NS_METHOD
nsMemCache::Delete(nsMemCacheRecord* aRecord)
{
nsMemCacheRecord *removedRecord;
char *recordIDbytes = NS_REINTERPRET_CAST(char *, &aRecord->mRecordID);
nsOpaqueKey opaqueRecordIDKey(recordIDbytes,
sizeof aRecord->mRecordID);
removedRecord = (nsMemCacheRecord*)mHashTable->Remove(&opaqueRecordIDKey);
NS_ASSERTION(removedRecord == aRecord, "memory cache database inconsistent");
nsOpaqueKey opaqueKey(aRecord->mKey, aRecord->mKeyLength);
removedRecord = (nsMemCacheRecord*)mHashTable->Remove(&opaqueKey);
NS_ASSERTION(removedRecord == aRecord, "memory cache database inconsistent");
aRecord->Release();
mNumEntries--;
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::GetEnabled(PRBool *aEnabled)
{
NS_ENSURE_ARG(aEnabled);
*aEnabled = mEnabled;
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::SetEnabled(PRBool aEnabled)
{
mEnabled = aEnabled;
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::GetFlags(PRUint32 *aFlags)
{
NS_ENSURE_ARG(aFlags);
*aFlags = MEMORY_CACHE;
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::GetNumEntries(PRUint32 *aNumEntries)
{
NS_ENSURE_ARG(aNumEntries);
*aNumEntries = mNumEntries;
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::GetMaxEntries(PRUint32 *aMaxEntries)
{
NS_ENSURE_ARG(aMaxEntries);
*aMaxEntries = MEM_CACHE_MAX_ENTRIES;
return NS_OK;
}
static NS_METHOD
HashEntryConverter(nsHashKey *aKey, void *aValue,
void *unused, nsISupports **retval)
{
nsMemCacheRecord *record;
nsOpaqueKey *opaqueKey;
record = (nsMemCacheRecord*)aValue;
opaqueKey = (nsOpaqueKey*)aKey;
// Hash table keys that index cache entries by their record ID
// shouldn't be enumerated.
if ((opaqueKey->GetKeyLength() == sizeof(PRInt32))) {
#ifdef DEBUG
PRInt32 recordID;
record->GetRecordID(&recordID);
NS_ASSERTION(*((PRInt32*)opaqueKey->GetKey()) == recordID,
"Key has incorrect key length");
#endif
return NS_ERROR_FAILURE;
}
NS_IF_ADDREF(record);
*retval = NS_STATIC_CAST(nsISupports*, record);
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::NewCacheEntryIterator(nsISimpleEnumerator* *aIterator)
{
nsCOMPtr<nsIEnumerator> iterator;
NS_ENSURE_ARG(aIterator);
NS_NewHashtableEnumerator(mHashTable, HashEntryConverter,
mHashTable, getter_AddRefs(iterator));
return NS_NewAdapterEnumerator(aIterator, iterator);
}
NS_IMETHODIMP
nsMemCache::GetNextCache(nsINetDataCache* *aNextCache)
{
NS_ENSURE_ARG(aNextCache);
*aNextCache = mNextCache;
NS_ADDREF(*aNextCache);
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::SetNextCache(nsINetDataCache* aNextCache)
{
mNextCache = aNextCache;
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::GetStorageInUse(PRUint32 *aStorageInUse)
{
NS_ENSURE_ARG(aStorageInUse);
// Convert from bytes to KB
*aStorageInUse = (mOccupancy >> 10);
return NS_OK;
}
NS_IMETHODIMP
nsMemCache::RemoveAll(void)
{
PRBool failed;
nsCOMPtr<nsISimpleEnumerator> iterator;
nsCOMPtr<nsISupports> recordSupports;
nsCOMPtr<nsINetDataCacheRecord> record;
nsresult rv;
failed = PR_FALSE;
rv = NewCacheEntryIterator(getter_AddRefs(iterator));
if (NS_FAILED(rv))
return rv;
PRBool notDone;
while (1) {
rv = iterator->HasMoreElements(&notDone);
if (NS_FAILED(rv)) return rv;
if (!notDone)
break;
iterator->GetNext(getter_AddRefs(recordSupports));
record = do_QueryInterface(recordSupports);
recordSupports = 0;
PRUint32 bytesUsed;
record->GetStoredContentLength(&bytesUsed);
rv = record->Delete();
if (NS_FAILED(rv)) {
failed = PR_TRUE;
continue;
}
mOccupancy -= bytesUsed;
}
if (failed)
return NS_ERROR_FAILURE;
return NS_OK;
}

View File

@@ -0,0 +1,83 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
*/
/**
* nsMemCache is the implementation of an in-memory network-data
* cache, used to cache the responses to network retrieval commands.
* Each cache entry may contain both content, e.g. GIF image data, and
* associated metadata, e.g. HTTP headers. Each entry is indexed by
* two different keys: a record id number and an opaque key, which is
* created by the cache manager by combining the URI with a "secondary
* key", e.g. HTTP post data.
*/
#ifndef _nsMemCache_h_
#define _nsMemCache_h_
#include "nsINetDataCache.h"
// Maximum number of URIs that may be resident in the cache
#define MEM_CACHE_MAX_ENTRIES 1000
#define MEM_CACHE_SEGMENT_SIZE (1 << 12)
#define MEM_CACHE_MAX_ENTRY_SIZE (1 << 20)
class nsHashtable;
class nsMemCacheRecord;
class nsMemCache : public nsINetDataCache
{
public:
nsMemCache();
virtual ~nsMemCache();
nsresult Init();
// nsISupports methods
NS_DECL_ISUPPORTS
// nsINetDataCache methods
NS_DECL_NSINETDATACACHE
// Factory
static NS_METHOD nsMemCacheConstructor(nsISupports *aOuter, REFNSIID aIID,
void **aResult);
protected:
PRUint32 mNumEntries;
PRUint32 mOccupancy; // Memory used, in bytes
PRBool mEnabled; // If false, bypass mem cache
nsINetDataCache* mNextCache;
// Mapping from either opaque key or record ID to nsMemCacheRecord
nsHashtable* mHashTable;
// Used to assign record ID's
static PRInt32 gRecordSerialNumber;
NS_METHOD Delete(nsMemCacheRecord* aRecord);
friend class nsMemCacheRecord;
friend class nsMemCacheChannel;
};
#endif // _nsMemCache_h_

View File

@@ -0,0 +1,36 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
*/
// XPCOM Class ID for the network data in-memory cache
#ifndef nsMEMCACHECID_h__
#define nsMEMCACHECID_h__
// {e4710560-7de2-11d3-90cb-0040056a906e}
#define NS_MEM_CACHE_FACTORY_CID \
{ \
0xe4710560, \
0x7de2, \
0x11d3, \
{0x90, 0xcb, 0x00, 0x40, 0x05, 0x6a, 0x90, 0x6e} \
}
#endif // nsMEMCACHECID_h__

View File

@@ -0,0 +1,464 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
*/
#include "nsMemCache.h"
#include "nsMemCacheChannel.h"
#include "nsIStreamListener.h"
#include "nsIChannel.h"
#include "nsIStorageStream.h"
#include "nsIOutputStream.h"
#include "nsIServiceManager.h"
#include "nsIEventQueueService.h"
#include "nsNetUtil.h"
#include "nsILoadGroup.h"
static NS_DEFINE_CID(kIOServiceCID, NS_IOSERVICE_CID);
static NS_DEFINE_CID(kEventQueueService, NS_EVENTQUEUESERVICE_CID);
NS_IMPL_ISUPPORTS(nsMemCacheChannel, NS_GET_IID(nsIChannel))
void
nsMemCacheChannel::NotifyStorageInUse(PRInt32 aBytesUsed)
{
mRecord->mCache->mOccupancy += aBytesUsed;
}
/**
* This class acts as an adaptor around a synchronous input stream to add async
* read capabilities. It adds methods for initiating, suspending, resuming and
* cancelling async reads.
*/
class AsyncReadStreamAdaptor : public nsIInputStream {
public:
AsyncReadStreamAdaptor(nsMemCacheChannel* aChannel, nsIInputStream *aSyncStream):
mSyncStream(aSyncStream), mDataAvailCursor(0),
mRemaining(0), mChannel(aChannel), mAvailable(0), mAborted(false), mSuspended(false)
{
NS_INIT_REFCNT();
NS_ADDREF(mChannel);
}
virtual ~AsyncReadStreamAdaptor() {
mChannel->mAsyncReadStream = 0;
NS_RELEASE(mChannel);
}
NS_DECL_ISUPPORTS
nsresult
IsPending(PRBool* aIsPending) {
*aIsPending = (mRemaining != 0) && !mAborted;
return NS_OK;
}
nsresult
Cancel(void) {
mAborted = true;
return mStreamListener->OnStopRequest(mChannel, mContext, NS_BINDING_ABORTED, nsnull);
}
nsresult
Suspend(void) { mSuspended = true; return NS_OK; }
nsresult
Resume(void) {
if (!mSuspended)
return NS_ERROR_FAILURE;
mSuspended = false;
return NextListenerEvent();
}
NS_IMETHOD
Available(PRUint32 *aNumBytes) { return mAvailable; }
NS_IMETHOD
Read(char* aBuf, PRUint32 aCount, PRUint32 *aBytesRead) {
if (mAborted)
return NS_ERROR_ABORT;
*aBytesRead = 0;
aCount = PR_MIN(aCount, mAvailable);
nsresult rv = mSyncStream->Read(aBuf, aCount, aBytesRead);
mAvailable -= *aBytesRead;
if (NS_FAILED(rv) && (rv != NS_BASE_STREAM_WOULD_BLOCK)) {
Fail();
return rv;
}
if (!mSuspended && !mAvailable) {
rv = NextListenerEvent();
if (NS_FAILED(rv)) {
Fail();
return rv;
}
}
return NS_OK;
}
NS_IMETHOD
Close() {
nsresult rv = mSyncStream->Close();
mSyncStream = 0;
mContext = 0;
mStreamListener = 0;
return rv;
}
nsresult
AsyncRead(PRUint32 aStartPosition, PRInt32 aReadCount,
nsISupports* aContext, nsIStreamListener* aListener) {
nsresult rv;
nsIEventQueue *eventQ;
mContext = aContext;
mStreamListener = aListener;
mRemaining = aReadCount;
NS_WITH_SERVICE(nsIIOService, serv, kIOServiceCID, &rv);
if (NS_FAILED(rv)) return rv;
NS_WITH_SERVICE(nsIEventQueueService, eventQService, kEventQueueService, &rv);
if (NS_FAILED(rv)) return rv;
rv = eventQService->GetThreadEventQueue(PR_CurrentThread(), &eventQ);
if (NS_FAILED(rv)) return rv;
rv = NS_NewAsyncStreamListener(aListener, eventQ,
getter_AddRefs(mStreamListener));
NS_RELEASE(eventQ);
if (NS_FAILED(rv)) return rv;
rv = mStreamListener->OnStartRequest(mChannel, aContext);
if (NS_FAILED(rv)) return rv;
return NextListenerEvent();
}
protected:
nsresult
Fail(void) {
mAborted = true;
return mStreamListener->OnStopRequest(mChannel, mContext, NS_BINDING_FAILED, nsnull);
}
nsresult
NextListenerEvent() {
PRUint32 available;
nsresult rv = mSyncStream->Available(&available);
if (NS_FAILED(rv)) return rv;
available -= mAvailable;
available = PR_MIN(available, mRemaining);
if (available) {
PRUint32 size = PR_MIN(available, MEM_CACHE_SEGMENT_SIZE);
rv = mStreamListener->OnDataAvailable(mChannel, mContext, this,
mDataAvailCursor, size);
mDataAvailCursor += size;
mRemaining -= size;
mAvailable += size;
return rv;
} else {
rv = mStreamListener->OnStopRequest(mChannel, mContext, NS_OK, nsnull);
AsyncReadStreamAdaptor* thisAlias = this;
NS_RELEASE(thisAlias);
return rv;
}
}
private:
nsCOMPtr<nsISupports> mContext; // Opaque context passed to AsyncRead()
nsCOMPtr<nsIStreamListener> mStreamListener; // Stream listener that has been proxied
nsCOMPtr<nsIInputStream> mSyncStream; // Underlying synchronous stream that is
// being converted to an async stream
PRUint32 mDataAvailCursor;
PRUint32 mRemaining; // Size of AsyncRead request less bytes for
// consumer OnDataAvailable's that were fired
PRUint32 mAvailable; // Number of bytes for which OnDataAvailable fired
nsMemCacheChannel* mChannel; // Associated memory cache channel, strong link
// but can not use nsCOMPtr
bool mAborted; // Abort() has been called
bool mSuspended; // Suspend() has been called
};
NS_IMPL_ISUPPORTS(AsyncReadStreamAdaptor, NS_GET_IID(nsIInputStream))
// The only purpose of this output stream wrapper is to adjust the cache's
// overall occupancy as new data flows into the cache entry.
class MemCacheWriteStreamWrapper : public nsIOutputStream {
public:
MemCacheWriteStreamWrapper(nsMemCacheChannel* aChannel, nsIOutputStream *aBaseStream):
mBaseStream(aBaseStream), mChannel(aChannel)
{
NS_INIT_REFCNT();
NS_ADDREF(mChannel);
}
virtual ~MemCacheWriteStreamWrapper() { NS_RELEASE(mChannel); };
static nsresult
Create(nsMemCacheChannel* aChannel, nsIOutputStream *aBaseStream, nsIOutputStream* *aWrapper) {
MemCacheWriteStreamWrapper *wrapper =
new MemCacheWriteStreamWrapper(aChannel, aBaseStream);
if (!wrapper) return NS_ERROR_OUT_OF_MEMORY;
NS_ADDREF(wrapper);
*aWrapper = wrapper;
return NS_OK;
}
NS_DECL_ISUPPORTS
NS_IMETHOD
Write(const char *aBuffer, PRUint32 aCount, PRUint32 *aNumWritten) {
*aNumWritten = 0;
nsresult rv = mBaseStream->Write(aBuffer, aCount, aNumWritten);
mChannel->NotifyStorageInUse(*aNumWritten);
return rv;
}
NS_IMETHOD
Flush() { return mBaseStream->Flush(); }
NS_IMETHOD
Close() { return mBaseStream->Close(); }
private:
nsCOMPtr<nsIOutputStream> mBaseStream;
nsMemCacheChannel* mChannel;
};
NS_IMPL_ISUPPORTS(MemCacheWriteStreamWrapper, NS_GET_IID(nsIOutputStream))
nsMemCacheChannel::nsMemCacheChannel(nsMemCacheRecord *aRecord, nsILoadGroup *aLoadGroup)
: mRecord(aRecord)
{
NS_INIT_REFCNT();
mRecord->mNumChannels++;
}
nsMemCacheChannel::~nsMemCacheChannel()
{
mRecord->mNumChannels--;
}
NS_IMETHODIMP
nsMemCacheChannel::IsPending(PRBool* aIsPending)
{
*aIsPending = PR_FALSE;
if (!mAsyncReadStream)
return NS_OK;
return mAsyncReadStream->IsPending(aIsPending);
}
NS_IMETHODIMP
nsMemCacheChannel::Cancel(void)
{
if (!mAsyncReadStream)
return NS_ERROR_FAILURE;
return mAsyncReadStream->Cancel();
}
NS_IMETHODIMP
nsMemCacheChannel::Suspend(void)
{
if (!mAsyncReadStream)
return NS_ERROR_FAILURE;
return mAsyncReadStream->Suspend();
}
NS_IMETHODIMP
nsMemCacheChannel::Resume(void)
{
if (!mAsyncReadStream)
return NS_ERROR_FAILURE;
return mAsyncReadStream->Resume();
}
NS_IMETHODIMP
nsMemCacheChannel::GetOriginalURI(nsIURI * *aURI)
{
// Not required
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::GetURI(nsIURI * *aURI)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::OpenInputStream(PRUint32 aStartPosition, PRInt32 aReadCount,
nsIInputStream* *aResult)
{
nsresult rv;
NS_ENSURE_ARG(aResult);
if (mInputStream)
return NS_ERROR_NOT_AVAILABLE;
rv = mRecord->mStorageStream->NewInputStream(aStartPosition, getter_AddRefs(mInputStream));
*aResult = mInputStream;
NS_ADDREF(*aResult);
return rv;
}
NS_IMETHODIMP
nsMemCacheChannel::OpenOutputStream(PRUint32 startPosition, nsIOutputStream* *aResult)
{
nsresult rv;
NS_ENSURE_ARG(aResult);
nsCOMPtr<nsIOutputStream> outputStream;
PRUint32 oldLength;
mRecord->mStorageStream->GetLength(&oldLength);
rv = mRecord->mStorageStream->GetOutputStream(startPosition, getter_AddRefs(outputStream));
if (NS_FAILED(rv)) return rv;
if (startPosition < oldLength)
NotifyStorageInUse(startPosition - oldLength);
return MemCacheWriteStreamWrapper::Create(this, outputStream, aResult);
}
NS_IMETHODIMP
nsMemCacheChannel::AsyncOpen(nsIStreamObserver *observer, nsISupports *ctxt)
{
// Not required
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::AsyncRead(PRUint32 aStartPosition, PRInt32 aReadCount,
nsISupports *aContext, nsIStreamListener *aListener)
{
nsCOMPtr<nsIInputStream> inputStream;
nsresult rv = OpenInputStream(aStartPosition, aReadCount, getter_AddRefs(inputStream));
if (NS_FAILED(rv)) return rv;
AsyncReadStreamAdaptor *asyncReadStreamAdaptor;
asyncReadStreamAdaptor = new AsyncReadStreamAdaptor(this, inputStream);
if (!asyncReadStreamAdaptor)
return NS_ERROR_OUT_OF_MEMORY;
NS_ADDREF(asyncReadStreamAdaptor);
mAsyncReadStream = asyncReadStreamAdaptor;
rv = asyncReadStreamAdaptor->AsyncRead(aStartPosition, aReadCount, aContext, aListener);
if (NS_FAILED(rv))
delete asyncReadStreamAdaptor;
return rv;
}
NS_IMETHODIMP
nsMemCacheChannel::AsyncWrite(nsIInputStream *fromStream, PRUint32 startPosition,
PRInt32 writeCount, nsISupports *ctxt,
nsIStreamObserver *observer)
{
// Not required to be implemented
NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::GetLoadAttributes(nsLoadFlags *aLoadAttributes)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::SetLoadAttributes(nsLoadFlags aLoadAttributes)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::GetContentType(char* *aContentType)
{
// Not required to be implemented, since it is implemented by cache manager
// NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
// FIXME - lying for the purpose of testing
*aContentType = strdup("text/html");
return NS_OK;
}
NS_IMETHODIMP
nsMemCacheChannel::GetContentLength(PRInt32 *aContentLength)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::GetOwner(nsISupports* *aOwner)
{
*aOwner = mOwner.get();
NS_IF_ADDREF(*aOwner);
return NS_OK;
}
NS_IMETHODIMP
nsMemCacheChannel::SetOwner(nsISupports* aOwner)
{
// Not required to be implemented, since it is implemented by cache manager
mOwner = aOwner;
return NS_OK;
}
NS_IMETHODIMP
nsMemCacheChannel::GetLoadGroup(nsILoadGroup* *aLoadGroup)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::SetLoadGroup(nsILoadGroup* aLoadGroup)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::GetNotificationCallbacks(nsIInterfaceRequestor* *aNotificationCallbacks)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheChannel::SetNotificationCallbacks(nsIInterfaceRequestor* aNotificationCallbacks)
{
// Not required to be implemented, since it is implemented by cache manager
NS_ASSERTION(0, "nsMemCacheChannel method unexpectedly called");
return NS_ERROR_NOT_IMPLEMENTED;
}

View File

@@ -0,0 +1,61 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
*/
#ifndef _nsMemCacheChannel_h_
#define _nsMemCacheChannel_h_
#include "nsMemCacheRecord.h"
#include "nsIChannel.h"
#include "nsIInputStream.h"
#include "nsCOMPtr.h"
class AsyncReadStreamAdaptor;
class nsMemCacheChannel : public nsIChannel
{
public:
// Constructors and Destructor
nsMemCacheChannel(nsMemCacheRecord *aRecord, nsILoadGroup *aLoadGroup);
virtual ~nsMemCacheChannel();
// Declare nsISupports methods
NS_DECL_ISUPPORTS
// Declare nsIRequest methods
NS_DECL_NSIREQUEST
// Declare nsIChannel methods
NS_DECL_NSICHANNEL
protected:
void NotifyStorageInUse(PRInt32 aBytesUsed);
nsCOMPtr<nsMemCacheRecord> mRecord;
nsCOMPtr<nsIInputStream> mInputStream;
nsCOMPtr<nsISupports> mOwner;
AsyncReadStreamAdaptor* mAsyncReadStream; // non-owning pointer
friend class MemCacheWriteStreamWrapper;
friend class AsyncReadStreamAdaptor;
};
#endif // _nsMemCacheChannel_h_

View File

@@ -0,0 +1,164 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
*/
#include "nsMemCache.h"
#include "nsMemCacheRecord.h"
#include "nsMemCacheChannel.h"
#include "nsIAllocator.h"
#include "nsStorageStream.h"
static NS_DEFINE_IID(kINetDataCacheRecord, NS_INETDATACACHERECORD_IID);
nsMemCacheRecord::nsMemCacheRecord()
: mKey(0), mKeyLength(0), mMetaData(0), mMetaDataLength(0), mNumChannels(0)
{
NS_INIT_REFCNT();
}
nsMemCacheRecord::~nsMemCacheRecord()
{
if (mMetaData)
delete[] mMetaData;
if (mKey)
delete[] mKey;
}
NS_IMPL_ISUPPORTS(nsMemCacheRecord, NS_GET_IID(nsINetDataCacheRecord))
NS_IMETHODIMP
nsMemCacheRecord::GetKey(PRUint32 *aLength, char **aResult)
{
NS_ENSURE_ARG(aResult);
*aResult = (char *)nsAllocator::Alloc(mKeyLength);
if (!*aResult)
return NS_ERROR_OUT_OF_MEMORY;
memcpy(*aResult, mKey, mKeyLength);
*aLength = mKeyLength;
return NS_OK;
}
nsresult
nsMemCacheRecord::Init(const char *aKey, PRUint32 aKeyLength,
PRUint32 aRecordID, nsMemCache *aCache)
{
nsresult rv;
NS_ASSERTION(!mKey, "Memory cache record key set multiple times");
rv = NS_NewStorageStream(MEM_CACHE_SEGMENT_SIZE, MEM_CACHE_MAX_ENTRY_SIZE,
getter_AddRefs(mStorageStream));
if (NS_FAILED(rv)) return rv;
mKey = new char[aKeyLength];
if (!mKey)
return NS_ERROR_OUT_OF_MEMORY;
memcpy(mKey, aKey, aKeyLength);
mKeyLength = aKeyLength;
mRecordID = aRecordID;
mCache = aCache;
return NS_OK;
}
NS_IMETHODIMP
nsMemCacheRecord::GetRecordID(PRInt32 *aRecordID)
{
NS_ENSURE_ARG(aRecordID);
*aRecordID = mRecordID;
return NS_OK;
}
NS_IMETHODIMP
nsMemCacheRecord::GetMetaData(PRUint32 *aLength, char **aResult)
{
NS_ENSURE_ARG(aResult);
*aResult = 0;
if (mMetaDataLength) {
*aResult = (char*)nsAllocator::Alloc(mMetaDataLength);
if (!*aResult)
return NS_ERROR_OUT_OF_MEMORY;
memcpy(*aResult, mMetaData, mMetaDataLength);
}
*aLength = mMetaDataLength;
return NS_OK;
}
NS_IMETHODIMP
nsMemCacheRecord::SetMetaData(PRUint32 aLength, const char *aData)
{
if (mMetaData)
delete[] mMetaData;
mMetaData = new char[aLength];
if (!mMetaData)
return NS_ERROR_OUT_OF_MEMORY;
memcpy(mMetaData, aData, aLength);
mMetaDataLength = aLength;
return NS_OK;
}
NS_IMETHODIMP
nsMemCacheRecord::GetStoredContentLength(PRUint32 *aStoredContentLength)
{
NS_ENSURE_ARG(aStoredContentLength);
return mStorageStream->GetLength(aStoredContentLength);
}
NS_IMETHODIMP
nsMemCacheRecord::SetStoredContentLength(PRUint32 aStoredContentLength)
{
PRUint32 before, after;
mStorageStream->GetLength(&before);
nsresult rv = mStorageStream->SetLength(aStoredContentLength);
if (NS_FAILED(rv)) return rv;
mStorageStream->GetLength(&after);
mCache->mOccupancy -= (before - after);
return NS_OK;
}
NS_IMETHODIMP
nsMemCacheRecord::Delete(void)
{
if (mNumChannels)
return NS_ERROR_NOT_AVAILABLE;
return mCache->Delete(this);
}
NS_IMETHODIMP
nsMemCacheRecord::GetFilename(nsIFileSpec* *aFilename)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsMemCacheRecord::NewChannel(nsILoadGroup *aLoadGroup, nsIChannel* *aResult)
{
NS_ENSURE_ARG(aResult);
nsMemCacheChannel* channel = new nsMemCacheChannel(this, aLoadGroup);
if (!channel)
return NS_ERROR_OUT_OF_MEMORY;
NS_ADDREF(channel);
*aResult = NS_STATIC_CAST(nsIChannel*, channel);
return NS_OK;
}

View File

@@ -0,0 +1,65 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
*/
#ifndef _nsMemCacheRecord_h_
#define _nsMemCacheRecord_h_
#include "nsINetDataCacheRecord.h"
#include "nsCOMPtr.h"
class nsMemCache;
class nsIStorageStream;
class nsMemCacheRecord : public nsINetDataCacheRecord
{
public:
// Declare interface methods
NS_DECL_ISUPPORTS
NS_DECL_NSINETDATACACHERECORD
protected:
// Constructors and Destructor
nsMemCacheRecord();
virtual ~nsMemCacheRecord();
nsresult Init(const char *aKey, PRUint32 aKeyLength,
PRUint32 aRecordID, nsMemCache *aCache);
char* mKey; // opaque database key for this record
PRUint32 mKeyLength; // length, in bytes, of mKey
PRInt32 mRecordID; // An alternate key for this record
char* mMetaData; // opaque URI metadata
PRUint32 mMetaDataLength; // length, in bytes, of mMetaData
nsMemCache* mCache; // weak pointer to the cache database
// that this record inhabits
nsCOMPtr<nsIStorageStream> mStorageStream;
PRUint32 mNumChannels; // Count un-Release'ed nsIChannels
friend class nsMemCache;
friend class nsMemCacheChannel;
};
#endif // _nsMemCacheRecord_h_

51
mozilla/netwerk/cache/mgr/Makefile.in vendored Normal file
View File

@@ -0,0 +1,51 @@
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
#
DEPTH = ../../..
topsrcdir = @top_srcdir@
srcdir = @srcdir@
VPATH = @srcdir@
include $(DEPTH)/config/autoconf.mk
MODULE = nkcache
LIBRARY_NAME = nkcachemgr_s
REQUIRES = nspr
CPPSRCS = \
nsCacheManager.cpp \
nsCachedNetData.cpp \
nsReplacementPolicy.cpp \
nsCacheEntryChannel.cpp \
$(NULL)
LOCAL_INCLUDES = -I$(srcdir)/../public -I$(srcdir)/../include
EXTRA_LIBS = $(NSPR_LIBS)
# we don't want the shared lib, but we want to force the creation of a
# static lib.
override NO_SHARED_LIB=1
override NO_STATIC_LIB=
include $(topsrcdir)/config/rules.mk

45
mozilla/netwerk/cache/mgr/Makefile.win vendored Executable file
View File

@@ -0,0 +1,45 @@
#!gmake
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
DEPTH=..\..\..
include <$(DEPTH)/config/config.mak>
MODULE = nkcache
LIBRARY_NAME = nkcachemgr_s
CPP_OBJS = \
.\$(OBJDIR)\nsCacheManager.obj \
.\$(OBJDIR)\nsCachedNetData.obj \
.\$(OBJDIR)\nsReplacementPolicy.obj \
.\$(OBJDIR)\nsCacheEntryChannel.obj \
$(NULL)
include <$(DEPTH)\config\rules.mak>
install:: $(LIBRARY)
$(MAKE_INSTALL) $(LIBRARY) $(DIST)\lib
clobber::
rm -rf $(OBJDIR)
rm -f $(DIST)\lib\$(LIBRARY_NAME).lib

View File

@@ -0,0 +1,260 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1999 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Scott Furman, fur@netscape.com
*/
#include "nsCacheManager.h"
#include "nsCacheEntryChannel.h"
#include "nsIOutputStream.h"
#include "nsIIOService.h"
#include "nsIServiceManager.h"
#include "nsIStreamListener.h"
nsCacheEntryChannel::nsCacheEntryChannel(nsCachedNetData* aCacheEntry, nsIChannel* aChannel,
nsILoadGroup* aLoadGroup):
nsChannelProxy(aChannel), mCacheEntry(aCacheEntry), mLoadGroup(aLoadGroup), mLoadAttributes(0)
{
NS_ASSERTION(aCacheEntry->mChannelCount < 0xFF, "Overflowed channel counter");
mCacheEntry->mChannelCount++;
NS_INIT_REFCNT();
}
nsCacheEntryChannel::~nsCacheEntryChannel()
{
mCacheEntry->mChannelCount--;
}
NS_IMPL_ISUPPORTS3(nsCacheEntryChannel, nsISupports, nsIChannel, nsIRequest)
// A proxy for nsIOutputStream
class CacheOutputStream : public nsIOutputStream {
public:
CacheOutputStream(nsIOutputStream *aOutputStream, nsCachedNetData *aCacheEntry):
mOutputStream(aOutputStream), mCacheEntry(aCacheEntry), mStartTime(PR_Now())
{ NS_INIT_REFCNT(); }
virtual ~CacheOutputStream() {}
NS_DECL_ISUPPORTS
NS_IMETHOD Close() {
mCacheEntry->NoteDownloadTime(mStartTime, PR_Now());
mCacheEntry->ClearFlag(nsCachedNetData::UPDATE_IN_PROGRESS);
return mOutputStream->Close();
}
NS_IMETHOD Flush() { return mOutputStream->Flush(); }
NS_IMETHOD
Write(const char *aBuf, PRUint32 aCount, PRUint32 *aActualBytes) {
nsresult rv;
*aActualBytes = 0;
rv = mOutputStream->Write(aBuf, aCount, aActualBytes);
mCacheEntry->mLogicalLength += *aActualBytes;
if (NS_FAILED(rv)) return rv;
nsCacheManager::LimitCacheSize();
return rv;
}
protected:
nsCOMPtr<nsIOutputStream> mOutputStream;
nsCOMPtr<nsCachedNetData> mCacheEntry;
// Time at which stream was opened
PRTime mStartTime;
};
NS_IMPL_ISUPPORTS(CacheOutputStream, NS_GET_IID(nsIOutputStream))
NS_IMETHODIMP
nsCacheEntryChannel::OpenOutputStream(PRUint32 aStartPosition, nsIOutputStream* *aOutputStream)
{
nsresult rv;
nsCOMPtr<nsIOutputStream> baseOutputStream;
rv = mChannel->OpenOutputStream(aStartPosition, getter_AddRefs(baseOutputStream));
if (NS_FAILED(rv)) return rv;
mCacheEntry->NoteUpdate();
mCacheEntry->NoteAccess();
mCacheEntry->mLogicalLength = aStartPosition;
*aOutputStream = new CacheOutputStream(baseOutputStream, mCacheEntry);
if (!*aOutputStream)
return NS_ERROR_OUT_OF_MEMORY;
NS_ADDREF(*aOutputStream);
return NS_OK;
}
NS_IMETHODIMP
nsCacheEntryChannel::OpenInputStream(PRUint32 aStartPosition, PRInt32 aReadCount,
nsIInputStream* *aInputStream)
{
mCacheEntry->NoteAccess();
return mChannel->OpenInputStream(aStartPosition, aReadCount, aInputStream);
}
class CacheManagerStreamListener: public nsIStreamListener {
public:
CacheManagerStreamListener(nsIStreamListener *aListener,
nsILoadGroup *aLoadGroup, nsIChannel *aChannel):
mListener(aListener), mLoadGroup(aLoadGroup), mChannel(aChannel)
{ NS_INIT_REFCNT(); }
virtual ~CacheManagerStreamListener() {}
private:
NS_DECL_ISUPPORTS
NS_IMETHOD
OnDataAvailable(nsIChannel *channel, nsISupports *aContext,
nsIInputStream *inStr, PRUint32 sourceOffset, PRUint32 count) {
return mListener->OnDataAvailable(mChannel, aContext, inStr, sourceOffset, count);
}
NS_IMETHOD
OnStartRequest(nsIChannel *channel, nsISupports *aContext) {
if (mLoadGroup)
mLoadGroup->AddChannel(mChannel, aContext);
return mListener->OnStartRequest(mChannel, aContext);
}
NS_IMETHOD
OnStopRequest(nsIChannel *channel, nsISupports *aContext,
nsresult status, const PRUnichar *errorMsg) {
nsresult rv;
rv = mListener->OnStopRequest(mChannel, aContext, status, errorMsg);
if (mLoadGroup)
mLoadGroup->RemoveChannel(mChannel, aContext, status, errorMsg);
return rv;
}
private:
nsCOMPtr<nsIStreamListener> mListener;
nsCOMPtr<nsILoadGroup> mLoadGroup;
nsCOMPtr<nsIChannel> mChannel;
};
NS_IMPL_ISUPPORTS2(CacheManagerStreamListener, nsIStreamListener, nsIStreamObserver)
NS_IMETHODIMP
nsCacheEntryChannel::AsyncRead(PRUint32 aStartPosition, PRInt32 aReadCount,
nsISupports *aContext, nsIStreamListener *aListener)
{
nsresult rv;
mCacheEntry->NoteAccess();
nsCOMPtr<nsIStreamListener> headListener;
if (mLoadGroup) {
mLoadGroup->GetDefaultLoadAttributes(&mLoadAttributes);
// Create a load group "proxy" listener...
nsCOMPtr<nsILoadGroupListenerFactory> factory;
rv = mLoadGroup->GetGroupListenerFactory(getter_AddRefs(factory));
if (NS_SUCCEEDED(rv) && factory) {
rv = factory->CreateLoadGroupListener(aListener,
getter_AddRefs(headListener));
if (NS_FAILED(rv)) return rv;
}
} else {
headListener = aListener;
}
CacheManagerStreamListener* cacheManagerStreamListener;
nsIChannel *channelForListener;
channelForListener = mProxyChannel ? mProxyChannel : this;
cacheManagerStreamListener =
new CacheManagerStreamListener(headListener, mLoadGroup, channelForListener);
if (!cacheManagerStreamListener) return NS_ERROR_OUT_OF_MEMORY;
NS_ADDREF(cacheManagerStreamListener);
rv = mChannel->AsyncRead(aStartPosition, aReadCount, aContext,
cacheManagerStreamListener);
NS_RELEASE(cacheManagerStreamListener);
return rv;
}
// No async writes allowed to the cache yet
NS_IMETHODIMP
nsCacheEntryChannel::AsyncWrite(nsIInputStream *aFromStream, PRUint32 aStartPosition,
PRInt32 aWriteCount, nsISupports *aContext,
nsIStreamObserver *aObserver)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP
nsCacheEntryChannel::GetLoadGroup(nsILoadGroup* *aLoadGroup)
{
*aLoadGroup = mLoadGroup;
return NS_OK;
}
NS_IMETHODIMP
nsCacheEntryChannel::GetLoadAttributes(nsLoadFlags *aLoadAttributes)
{
*aLoadAttributes = mLoadAttributes;
return NS_OK;
}
NS_IMETHODIMP
nsCacheEntryChannel::SetLoadAttributes(nsLoadFlags aLoadAttributes)
{
mLoadAttributes = aLoadAttributes;
return NS_OK;
}
static NS_DEFINE_CID(kIOServiceCID, NS_IOSERVICE_CID);
NS_IMETHODIMP
nsCacheEntryChannel::GetURI(nsIURI * *aURI)
{
char* spec;
nsresult rv;
rv = mCacheEntry->GetUriSpec(&spec);
if (NS_FAILED(rv)) return rv;
NS_WITH_SERVICE(nsIIOService, serv, kIOServiceCID, &rv);
if (NS_FAILED(rv)) return rv;
rv = serv->NewURI(spec, 0, aURI);
nsAllocator::Free(spec);
return rv;
}
NS_IMETHODIMP
nsCacheEntryChannel::GetOriginalURI(nsIURI * *aURI)
{
// FIXME - should return original URI passed into NewChannel() ?
return NS_ERROR_NOT_IMPLEMENTED;
}

View File

@@ -0,0 +1,82 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Scott Furman, fur@netscape.com
*/
#ifndef _nsCacheEntryChannel_h_
#define _nsCacheEntryChannel_h_
#include "nsCOMPtr.h"
#include "nsIChannel.h"
#include "nsCachedNetData.h"
#include "nsILoadGroup.h"
class nsIStreamListener;
// A proxy for an nsIChannel, useful when only a few nsIChannel
// methods must be overridden
class nsChannelProxy : public nsIChannel {
public:
NS_FORWARD_NSICHANNEL(mChannel->)
NS_FORWARD_NSIREQUEST(mChannel->)
protected:
nsChannelProxy(nsIChannel* aChannel):mChannel(aChannel) {};
virtual ~nsChannelProxy() {};
nsCOMPtr<nsIChannel> mChannel;
};
// Override several nsIChannel methods so that they interact with the cache manager
class nsCacheEntryChannel : public nsChannelProxy {
public:
NS_DECL_ISUPPORTS
NS_IMETHOD OpenOutputStream(PRUint32 aStartPosition, nsIOutputStream* *aOutputStream);
NS_IMETHOD OpenInputStream(PRUint32 aStartPosition, PRInt32 aReadCount,
nsIInputStream* *aInputStream);
NS_IMETHOD AsyncRead(PRUint32 aStartPosition, PRInt32 aReadCount,
nsISupports *aContext, nsIStreamListener *aListener);
NS_IMETHOD AsyncWrite(nsIInputStream *aFromStream, PRUint32 aStartPosition,
PRInt32 aWriteCount, nsISupports *aContext,
nsIStreamObserver *aObserver);
NS_IMETHOD GetLoadAttributes(nsLoadFlags *aLoadAttributes);
NS_IMETHOD SetLoadAttributes(nsLoadFlags aLoadAttributes);
NS_IMETHOD GetLoadGroup(nsILoadGroup* *aLoadGroup);
NS_IMETHOD GetURI(nsIURI * *aURI);
NS_IMETHOD GetOriginalURI(nsIURI * *aURI);
protected:
nsCacheEntryChannel(nsCachedNetData* aCacheEntry, nsIChannel* aChannel, nsILoadGroup* aLoadGroup);
virtual ~nsCacheEntryChannel();
friend class nsCachedNetData;
private:
nsCOMPtr<nsCachedNetData> mCacheEntry;
nsCOMPtr<nsILoadGroup> mLoadGroup;
nsCOMPtr<nsIChannel> mProxyChannel;
nsLoadFlags mLoadAttributes;
};
#endif // _nsCacheEntryChannel_h_

View File

@@ -0,0 +1,496 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Scott Furman, fur@netscape.com
*/
#include "nsINetDataCache.h"
#include "nsCacheManager.h"
#include "nsCachedNetData.h"
#include "nsReplacementPolicy.h"
#include "nsString.h"
#include "nsIURI.h"
#include "nsHashtable.h"
#include "nsIComponentManager.h"
#include "nsINetDataDiskCache.h"
// Limit the number of entries in the cache to conserve memory space
// in the nsReplacementPolicy code
#define MAX_MEM_CACHE_ENTRIES 800
#define MAX_DISK_CACHE_ENTRIES 3200
// Cache capacities in MB, overridable via APIs
#define DEFAULT_MEMORY_CACHE_CAPACITY 2000
#define DEFAULT_DISK_CACHE_CAPACITY 10000
#define CACHE_HIGH_WATER_MARK(capacity) ((PRUint32)(0.98 * (capacity)))
#define CACHE_LOW_WATER_MARK(capacity) ((PRUint32)(0.97 * (capacity)))
nsCacheManager* gCacheManager = 0;
NS_IMPL_ISUPPORTS(nsCacheManager, NS_GET_IID(nsINetDataCacheManager))
nsCacheManager::nsCacheManager()
: mActiveCacheRecords(0),
mDiskCacheCapacity(DEFAULT_DISK_CACHE_CAPACITY),
mMemCacheCapacity(DEFAULT_MEMORY_CACHE_CAPACITY)
{
NS_ASSERTION(!gCacheManager, "Multiple cache managers created");
gCacheManager = this;
NS_INIT_REFCNT();
}
nsCacheManager::~nsCacheManager()
{
gCacheManager = 0;
delete mActiveCacheRecords;
delete mMemSpaceManager;
delete mDiskSpaceManager;
}
nsresult
nsCacheManager::Init()
{
nsresult rv;
mActiveCacheRecords = new nsHashtable(64);
if (!mActiveCacheRecords)
return NS_ERROR_OUT_OF_MEMORY;
// Instantiate the memory cache component
rv = nsComponentManager::CreateInstance(NS_NETWORK_MEMORY_CACHE_PROGID,
nsnull,
NS_GET_IID(nsINetDataCache),
getter_AddRefs(mMemCache));
if (NS_FAILED(rv))
return rv;
rv = nsComponentManager::CreateInstance(NS_NETWORK_FLAT_CACHE_PROGID,
nsnull,
NS_GET_IID(nsINetDataCache),
getter_AddRefs(mFlatCache));
if (NS_FAILED(rv)) {
// For now, we don't require a flat cache module to be present
if (rv != NS_ERROR_FACTORY_NOT_REGISTERED)
return rv;
}
#ifdef FILE_CACHE_IS_READY
// Instantiate the file cache component
rv = nsComponentManager::CreateInstance(NS_NETWORK_FILE_CACHE_PROGID,
nsnull,
NS_GET_IID(nsINetDataCache),
getter_AddRefs(mFileCache));
if (NS_FAILED(rv)) {
NS_WARNING("No disk cache present");
}
#endif
// Set up linked list of caches in search order
mCacheSearchChain = mMemCache;
if (mFlatCache) {
mMemCache->SetNextCache(mFlatCache);
mFlatCache->SetNextCache(mFileCache);
} else {
mMemCache->SetNextCache(mFileCache);
}
// TODO - Load any extension caches here
// Initialize replacement policy for memory cache module
mMemSpaceManager = new nsReplacementPolicy;
if (!mMemSpaceManager)
return NS_ERROR_OUT_OF_MEMORY;
rv = mMemSpaceManager->Init(MAX_MEM_CACHE_ENTRIES);
if (NS_FAILED(rv)) return rv;
rv = mMemSpaceManager->AddCache(mMemCache);
// Initialize replacement policy for disk cache modules (file
// cache and flat cache)
mDiskSpaceManager = new nsReplacementPolicy;
if (!mDiskSpaceManager)
return NS_ERROR_OUT_OF_MEMORY;
rv = mDiskSpaceManager->Init(MAX_DISK_CACHE_ENTRIES);
if (NS_FAILED(rv)) return rv;
if (mFileCache) {
rv = mDiskSpaceManager->AddCache(mFileCache);
if (NS_FAILED(rv)) return rv;
}
if (mFlatCache) {
rv = mDiskSpaceManager->AddCache(mFlatCache);
if (NS_FAILED(rv)) return rv;
}
return NS_OK;
}
NS_IMETHODIMP
nsCacheManager::GetCachedNetData(const char *aUriSpec, const char *aSecondaryKey,
PRUint32 aSecondaryKeyLength,
PRUint32 aFlags, nsICachedNetData* *aResult)
{
nsCachedNetData *cachedData;
nsresult rv;
nsINetDataCache *cache;
nsReplacementPolicy *spaceManager;
if (aFlags & CACHE_AS_FILE) {
cache = mFileCache;
spaceManager = mDiskSpaceManager;
// Ensure that cache is initialized
if (mDiskCacheCapacity == (PRUint32)-1)
return NS_ERROR_NOT_AVAILABLE;
} else if ((aFlags & BYPASS_PERSISTENT_CACHE) || !mDiskCacheCapacity) {
cache = mMemCache;
spaceManager = mMemSpaceManager;
} else {
cache = mFlatCache ? mFlatCache : mFileCache;
spaceManager = mDiskSpaceManager;
}
// Construct the cache key by appending the secondary key to the URI spec
nsCAutoString cacheKey(aUriSpec);
// Insert NUL at end of URI spec
cacheKey += '\0';
if (aSecondaryKey)
cacheKey.Append(aSecondaryKey, aSecondaryKeyLength);
nsStringKey key(cacheKey);
cachedData = (nsCachedNetData*)mActiveCacheRecords->Get(&key);
// There is no existing instance of nsCachedNetData for this URL.
// Make one from the corresponding record in the cache module.
if (cachedData) {
NS_ASSERTION(cache == cachedData->mCache,
"Cannot yet handle simultaneously active requests for the "
"same URL using different caches");
NS_ADDREF(cachedData);
} else {
rv = spaceManager->GetCachedNetData(cacheKey.GetBuffer(), cacheKey.Length(),
cache, &cachedData);
if (NS_FAILED(rv)) return rv;
mActiveCacheRecords->Put(&key, cachedData);
}
*aResult = cachedData;
return NS_OK;
}
// Remove this cache entry from the list of active ones
nsresult
nsCacheManager::NoteDormant(nsCachedNetData* aEntry)
{
nsresult rv;
PRUint32 keyLength;
char* key;
nsCOMPtr<nsINetDataCacheRecord> record;
nsCachedNetData* deletedEntry;
rv = aEntry->GetRecord(getter_AddRefs(record));
if (NS_FAILED(rv)) return rv;
rv = record->GetKey(&keyLength, &key);
if (NS_FAILED(rv)) return rv;
nsStringKey hashTableKey(nsCString(key, keyLength));
deletedEntry = (nsCachedNetData*)gCacheManager->mActiveCacheRecords->Remove(&hashTableKey);
NS_ASSERTION(deletedEntry == aEntry, "Hash table inconsistency");
return NS_OK;
}
NS_IMETHODIMP
nsCacheManager::Contains(const char *aUriSpec, const char *aSecondaryKey,
PRUint32 aSecondaryKeyLength,
PRUint32 aFlags, PRBool *aResult)
{
nsINetDataCache *cache;
nsReplacementPolicy *spaceManager;
nsCachedNetData *cachedData;
if (aFlags & CACHE_AS_FILE) {
cache = mFileCache;
spaceManager = mDiskSpaceManager;
} else if ((aFlags & BYPASS_PERSISTENT_CACHE) ||
(!mFileCache && !mFlatCache) || !mDiskCacheCapacity) {
cache = mMemCache;
spaceManager = mMemSpaceManager;
} else {
cache = mFlatCache ? mFlatCache : mFileCache;
spaceManager = mDiskSpaceManager;
}
// Construct the cache key by appending the secondary key to the URI spec
nsCAutoString cacheKey(aUriSpec);
// Insert NUL between URI spec and secondary key
cacheKey += '\0';
cacheKey.Append(aSecondaryKey, aSecondaryKeyLength);
// Locate the record using (URI + secondary key)
nsStringKey key(cacheKey);
cachedData = (nsCachedNetData*)mActiveCacheRecords->Get(&key);
if (cachedData && (cache == cachedData->mCache)) {
*aResult = PR_TRUE;
return NS_OK;
} else {
// No active cache entry, see if there is a dormant one
return cache->Contains(cacheKey.GetBuffer(), cacheKey.Length(), aResult);
}
}
NS_IMETHODIMP
nsCacheManager::GetNumEntries(PRUint32 *aNumEntries)
{
nsresult rv;
nsCOMPtr<nsISimpleEnumerator> iterator;
nsCOMPtr<nsISupports> cacheSupports;
nsCOMPtr<nsINetDataCache> cache;
PRUint32 totalEntries = 0;
rv = NewCacheModuleIterator(getter_AddRefs(iterator));
if (NS_FAILED(rv)) return rv;
while (1) {
PRBool notDone;
rv = iterator->HasMoreElements(&notDone);
if (NS_FAILED(rv)) return rv;
if (!notDone)
break;
iterator->GetNext(getter_AddRefs(cacheSupports));
cache = do_QueryInterface(cacheSupports);
PRUint32 numEntries;
rv = cache->GetNumEntries(&numEntries);
if (NS_FAILED(rv)) return rv;
totalEntries += numEntries;
}
*aNumEntries = totalEntries;
return NS_OK;
}
NS_IMETHODIMP
nsCacheManager::NewCacheEntryIterator(nsISimpleEnumerator* *aResult)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
class CacheEnumerator : public nsISimpleEnumerator
{
public:
CacheEnumerator(nsINetDataCache* aFirstCache):mCache(aFirstCache)
{ NS_INIT_REFCNT(); }
virtual ~CacheEnumerator() {};
NS_DECL_ISUPPORTS
NS_IMETHODIMP
HasMoreElements(PRBool* aMoreElements) {
*aMoreElements = (mCache != 0);
return NS_OK;
}
NS_IMETHODIMP
GetNext(nsISupports* *aSupports) {
*aSupports = mCache;
if (!mCache)
return NS_ERROR_FAILURE;
NS_ADDREF(*aSupports);
nsCOMPtr<nsINetDataCache> nextCache;
nsresult rv = mCache->GetNextCache(getter_AddRefs(nextCache));
mCache = nextCache;
return rv;
}
private:
nsCOMPtr<nsINetDataCache> mCache;
};
NS_IMPL_ISUPPORTS(CacheEnumerator, NS_GET_IID(nsISimpleEnumerator))
NS_IMETHODIMP
nsCacheManager::NewCacheModuleIterator(nsISimpleEnumerator* *aResult)
{
*aResult = new CacheEnumerator(mCacheSearchChain);
if (!*aResult)
return NS_ERROR_OUT_OF_MEMORY;
NS_ADDREF(*aResult);
return NS_OK;
}
NS_IMETHODIMP
nsCacheManager::RemoveAll(void)
{
nsresult rv, result;
nsCOMPtr<nsISimpleEnumerator> iterator;
nsCOMPtr<nsINetDataCache> cache;
nsCOMPtr<nsISupports> iSupports;
result = NS_OK;
rv = NewCacheModuleIterator(getter_AddRefs(iterator));
if (NS_FAILED(rv)) return rv;
while (1) {
PRBool notDone;
rv = iterator->HasMoreElements(&notDone);
if (NS_FAILED(rv)) return rv;
if (!notDone)
break;
iterator->GetNext(getter_AddRefs(iSupports));
cache = do_QueryInterface(iSupports);
PRUint32 cacheFlags;
rv = cache->GetFlags(&cacheFlags);
if (NS_FAILED(rv)) return rv;
if ((cacheFlags & nsINetDataCache::READ_ONLY) == 0) {
rv = cache->RemoveAll();
if (NS_FAILED(rv))
result = rv;
}
}
return result;
}
nsresult
nsCacheManager::LimitMemCacheSize()
{
nsresult rv;
nsReplacementPolicy* spaceManager;
NS_ASSERTION(gCacheManager, "No cache manager");
spaceManager = gCacheManager->mMemSpaceManager;
PRUint32 occupancy;
rv = spaceManager->GetStorageInUse(&occupancy);
if (NS_FAILED(rv)) return rv;
PRUint32 memCacheCapacity = gCacheManager->mMemCacheCapacity;
if (occupancy > CACHE_HIGH_WATER_MARK(memCacheCapacity))
return spaceManager->Evict(CACHE_LOW_WATER_MARK(memCacheCapacity));
return NS_OK;
}
nsresult
nsCacheManager::LimitDiskCacheSize()
{
nsresult rv;
nsReplacementPolicy* spaceManager;
NS_ASSERTION(gCacheManager, "No cache manager");
spaceManager = gCacheManager->mDiskSpaceManager;
PRUint32 occupancy;
rv = spaceManager->GetStorageInUse(&occupancy);
if (NS_FAILED(rv)) return rv;
PRUint32 diskCacheCapacity = gCacheManager->mDiskCacheCapacity;
if (occupancy > CACHE_HIGH_WATER_MARK(diskCacheCapacity))
return spaceManager->Evict(CACHE_LOW_WATER_MARK(diskCacheCapacity));
return NS_OK;
}
nsresult
nsCacheManager::LimitCacheSize()
{
nsresult rv;
rv = LimitDiskCacheSize();
if (NS_FAILED(rv)) return rv;
rv = LimitMemCacheSize();
if (NS_FAILED(rv)) return rv;
return NS_OK;
}
NS_IMETHODIMP
nsCacheManager::SetMemCacheCapacity(PRUint32 aCapacity)
{
mMemCacheCapacity = aCapacity;
LimitCacheSize();
return NS_OK;
}
NS_IMETHODIMP
nsCacheManager::GetMemCacheCapacity(PRUint32* aCapacity)
{
NS_ENSURE_ARG_POINTER(aCapacity);
*aCapacity = mMemCacheCapacity;
return NS_OK;
}
NS_IMETHODIMP
nsCacheManager::SetDiskCacheCapacity(PRUint32 aCapacity)
{
mDiskCacheCapacity = aCapacity;
LimitCacheSize();
return NS_OK;
}
NS_IMETHODIMP
nsCacheManager::GetDiskCacheCapacity(PRUint32* aCapacity)
{
NS_ENSURE_ARG_POINTER(aCapacity);
*aCapacity = mDiskCacheCapacity;
return NS_OK;
}
NS_IMETHODIMP
nsCacheManager::SetDiskCacheFolder(nsIFileSpec* aFolder)
{
NS_ENSURE_ARG(aFolder);
if (!mFileCache)
return NS_ERROR_NOT_AVAILABLE;
nsCOMPtr<nsINetDataDiskCache> fileCache;
fileCache = do_QueryInterface(mFileCache);
return fileCache->SetDiskCacheFolder(aFolder);
}
NS_IMETHODIMP
nsCacheManager::GetDiskCacheFolder(nsIFileSpec* *aFolder)
{
NS_ENSURE_ARG(aFolder);
if (!mFileCache)
return NS_ERROR_NOT_AVAILABLE;
nsCOMPtr<nsINetDataDiskCache> fileCache;
fileCache = do_QueryInterface(mFileCache);
return fileCache->GetDiskCacheFolder(aFolder);
}

View File

@@ -0,0 +1,99 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Scott Furman, fur@netscape.com
*/
#ifndef _nsCacheManager_h_
#define _nsCacheManager_h_
// 2030f0b0-9567-11d3-90d3-0040056a906e
#define NS_CACHE_MANAGER_CID \
{ \
0x2030f0b0, \
0x9567, \
0x11d3, \
{0x90, 0xd3, 0x00, 0x40, 0x05, 0x6a, 0x90, 0x6e} \
}
#include "nsINetDataCacheManager.h"
#include "nsCOMPtr.h"
class nsHashtable;
class nsReplacementPolicy;
class nsCachedNetData;
class nsCacheManager : public nsINetDataCacheManager {
public:
nsCacheManager();
virtual ~nsCacheManager();
NS_METHOD Init();
// nsISupports methods
NS_DECL_ISUPPORTS
// nsINetDataCacheManager methods
NS_DECL_NSINETDATACACHEMANAGER
private:
// Mapping from cache key to nsCachedNetData, but only for those cache
// entries with external references, i.e. those referred to outside the
// cache manager
nsHashtable* mActiveCacheRecords;
// Memory cache
nsCOMPtr<nsINetDataCache> mMemCache;
// Flat-file database cache; All content aggregated into single disk file
nsCOMPtr<nsINetDataCache> mFlatCache;
// stream-as-file cache
nsCOMPtr<nsINetDataCache> mFileCache;
// Unified replacement policy for flat-cache and file-cache
nsReplacementPolicy* mDiskSpaceManager;
// Replacement policy for memory cache
nsReplacementPolicy* mMemSpaceManager;
// List of caches in search order
nsINetDataCache* mCacheSearchChain;
// Combined file/flat cache capacity, in KB
PRUint32 mDiskCacheCapacity;
// Memory cache capacity, in KB
PRUint32 mMemCacheCapacity;
protected:
static nsresult NoteDormant(nsCachedNetData* aEntry);
static nsresult LimitCacheSize();
static nsresult LimitMemCacheSize();
static nsresult LimitDiskCacheSize();
friend class nsCachedNetData;
friend class CacheOutputStream;
};
#endif // _nsCacheManager_h_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,242 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Scott Furman, fur@netscape.com
*/
#ifndef _nsCachedNetData_h_
#define _nsCachedNetData_h_
#include "nsICachedNetData.h"
#include "nsCOMPtr.h"
#include "nsINetDataCacheRecord.h"
class nsINetDataCache;
class nsIStreamAsFileObserver;
class nsIStreamAsFile;
class nsIArena;
class StreamAsFileObserverClosure;
class CacheMetaData;
// Number of recent access times recorded
#define MAX_K 3
/**
* FIXME - add comment. There are a lot of these data structures resident in
* memory, so be careful about adding members unnecessarily.
*/
class nsCachedNetData : public nsICachedNetData {
public:
NS_DECL_ISUPPORTS
// nsICachedNetData methods
NS_DECL_NSICACHEDNETDATA
NS_METHOD Init(nsINetDataCacheRecord *aRecord, nsINetDataCache *aCache);
protected:
// Bits for mFlags, below
typedef enum {
DIRTY = 1 << 0, // Cache entry data needs to be flushed to database
// ==== Flags that can be set by the protocol handler ====
ALLOW_PARTIAL = 1 << 1, // Protocol handler supports partial fetching
UPDATE_IN_PROGRESS = 1 << 2, // Protocol handler now modifying cache data
// ==== Cache-entry state flags. At most one of these flags can be set ====
TRUNCATED_CONTENT = 1 << 4, // Entry contains valid content, but it has
// been truncated by cache manager
// A previously-used cache entry, which has been purged of all cached
// content and protocol-private data. This cache entry can be refilled
// with new content or it may be retained in this vestigial state
// because the usage statistics it contains will be used by the
// replacement policy if the same URI is ever cached again.
VESTIGIAL = 1 << 5,
// ==== Memory usage status bits. At most one of these flags can be set ====
RECYCLED = 1 << 8, // Previously associated database record has
// been deleted; This cache entry is available
// for recycling.
DORMANT = 1 << 9, // No references to this cache entry, except by
// the cache manager itself
// ==== Setter bits ====
LAST_MODIFIED_KNOWN = 1 <<12, // Protocol handler called SetLastModifiedTime()
EXPIRATION_KNOWN = 1 <<13, // Protocol handler called SetExpirationTime()
STALE_TIME_KNOWN = 1 <<14, // Protocol handler called SetStaleTime()
// ==== Useful flag combinations ====
// Cache entry not eligible for eviction
UNEVICTABLE = VESTIGIAL | RECYCLED | UPDATE_IN_PROGRESS,
// State flags that are in-memory only, i.e. not persistent
TRANSIENT_FLAGS = DIRTY | RECYCLED | DORMANT
} Flag;
PRBool GetFlag(Flag aFlag) { return (mFlags & aFlag) != 0; }
nsresult GetFlag(PRBool *aResult, Flag aFlag) { *aResult = GetFlag(aFlag); return NS_OK; }
// Set a boolean flag for the cache entry
nsresult SetFlag(PRBool aValue, Flag aFlag);
nsresult SetFlag(Flag aFlag) { return SetFlag(PR_TRUE, aFlag); }
nsresult ClearFlag(Flag aFlag) { return SetFlag(PR_FALSE, aFlag); }
void ComputeProfit(PRUint32 aCurrentTime);
static int Compare(const void *a, const void *b, void *unused);
void NoteAccess();
void NoteUpdate();
// Get underlying raw cache database record.
nsresult GetRecord(nsINetDataCacheRecord* *aRecord);
nsresult GetRecordID(PRInt32 *aRecordID);
nsresult Evict(PRUint32 aTruncatedContentLength);
nsresult GetFileSpec(nsIFileSpec* *aFileSpec);
void NoteDownloadTime(PRTime start, PRTime end);
// placement new for arena-allocation
void *operator new (size_t aSize, nsIArena *aArena);
friend class nsReplacementPolicy;
friend class nsCacheManager;
friend class StreamAsFile;
friend class nsCacheEntryChannel;
friend class CacheOutputStream;
friend class InterceptStreamListener;
private:
nsCachedNetData() {};
virtual ~nsCachedNetData() {};
// Initialize internal fields of this nsCachedNetData instance from the
// underlying raw cache database record.
nsresult Deserialize(bool aDeserializeFlags);
// Notify stream-as-file observers about change in cache entry status
nsresult Notify(PRUint32 aMessage, nsresult aError);
// Add/Remove stream-as-file observers
nsresult AddObserver(nsIStreamAsFile *aStreamAsFile, nsIStreamAsFileObserver* aObserver);
nsresult RemoveObserver(nsIStreamAsFileObserver* aObserver);
// Mark cache entry to indicate a write out to the cache database is required
void SetDirty() { mFlags |= DIRTY; }
nsresult Resurrect(nsINetDataCacheRecord *aRecord);
nsresult CommitFlags();
CacheMetaData* FindTaggedMetaData(const char* aTag, PRBool aCreate);
private:
// List of nsIStreamAsFileObserver's that will receive notification events
// when the cache manager or a client desires to delete/truncate a cache
// entry file.
StreamAsFileObserverClosure* mObservers;
// Protocol-specific meta-data, opaque to the cache manager
CacheMetaData *mMetaData;
// Next in chain for a single bucket in the replacement policy hash table
// that maps from record ID to nsCachedNetData
nsCachedNetData* mNext;
// See flag bits, above
// NOTE: 16 bit member is combined with members below for
// struct packing efficiency. Do not change order of members!
PRUint16 mFlags;
protected:
// Number of nsCacheEntryChannels referring to this record
PRUint8 mChannelCount;
// Below members are statistics kept per cache-entry, used to decide how
// profitable it will be to evict a record from the cache relative to other
// existing records. Note: times are measured in *seconds* since the
// 1/1/70 epoch, same as a unix time_t.
// Number of accesses for this cache record
// NOTE: 8 bit member is combined with members above for
// struct packing efficiency. Do not change order of members!
PRUint8 mNumAccesses;
// A reference to the underlying, raw cache database record, either as a
// pointer to an in-memory object or as a database record identifier
union {
nsINetDataCacheRecord* mRecord;
// Database record ID of associated cache record. See
// nsINetDataCache::GetRecordByID().
PRInt32 mRecordID;
};
// Weak link to parent cache
nsINetDataCache* mCache;
// Length of stored content, which may be less than storage consumed if
// compression is used
PRUint32 mLogicalLength;
// Most recent cache entry access times, used to compute access frequency
PRUint32 mAccessTime[MAX_K];
// We use modification time of the original document for replacement policy
// computations, i.e. to compute a document's age, but if we don't know it,
// we use the time that the document was last written to the cache.
union {
// Document modification time, if known.
PRUint32 mLastModifiedTime;
// Time of last cache update for this doc
PRUint32 mLastUpdateTime;
};
union {
// Time until which document is fresh, i.e. does not have to be validated
// with server and, therefore, data in cache is guaranteed usable
PRUint32 mExpirationTime;
// Heuristic time at which cached document is likely to be out-of-date
// with respect to canonical copy on server. Used for cache replacement
// policy, not for validation.
PRUint32 mStaleTime;
};
// Download time per byte, measure roughly in units of KB/s
float mDownloadRate;
// Heuristic estimate of cache entry future benefits, based on above values
float mProfit;
};
#endif // _nsCachedNetData_h_

View File

@@ -0,0 +1,658 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Scott Furman, fur@netscape.com
*/
#include "nsReplacementPolicy.h"
#include "nsCachedNetData.h"
#include "nsQuickSort.h"
#include "nsIAllocator.h"
#include "nsIEnumerator.h"
#include "prtime.h"
#include "prbit.h"
#include "nsCOMPtr.h"
#include <math.h>
// Constant used to estimate frequency of access to a document based on size
#define CACHE_CONST_B 1.35
// A cache whose space is managed by this replacement policy
class nsReplacementPolicy::CacheInfo {
public:
CacheInfo(nsINetDataCache* aCache):mCache(aCache),mNext(0) {}
nsINetDataCache* mCache;
CacheInfo* mNext;
};
nsReplacementPolicy::nsReplacementPolicy()
: mRankedEntries(0), mCaches(0), mRecordsRemovedSinceLastRanking(0),
mNumEntries(0), mCapacityRankedEntriesArray(0), mLastRankTime(0) {}
nsReplacementPolicy::~nsReplacementPolicy()
{
if (mRankedEntries)
nsAllocator::Free(mRankedEntries);
if (mMapRecordIdToEntry)
nsAllocator::Free(mMapRecordIdToEntry);
}
nsresult
nsReplacementPolicy::Init(PRUint32 aMaxCacheEntries)
{
nsresult rv;
rv = NS_NewHeapArena(getter_AddRefs(mArena), sizeof(nsCachedNetData) * 32);
if (NS_FAILED(rv)) return rv;
mMaxEntries = aMaxCacheEntries;
mHashArrayLength = PR_CeilingLog2(aMaxCacheEntries) >> 3;
size_t numBytes = mHashArrayLength * sizeof(*mMapRecordIdToEntry);
mMapRecordIdToEntry = (nsCachedNetData**)nsAllocator::Alloc(numBytes);
if (!mMapRecordIdToEntry)
return NS_ERROR_OUT_OF_MEMORY;
nsCRT::zero(mMapRecordIdToEntry, numBytes);
return NS_OK;
}
nsresult
nsReplacementPolicy::AddCache(nsINetDataCache *aCache)
{
CacheInfo *cacheInfo = new CacheInfo(aCache);
if (!cacheInfo)
return NS_ERROR_OUT_OF_MEMORY;
cacheInfo->mNext = mCaches;
mCaches = cacheInfo;
return NS_OK;
}
PRUint32
nsReplacementPolicy::HashRecordID(PRInt32 aRecordID)
{
return ((aRecordID >> 16) ^ aRecordID) & (mHashArrayLength - 1);
}
nsCachedNetData*
nsReplacementPolicy::FindCacheEntryByRecordID(PRInt32 aRecordID, nsINetDataCache *aCache)
{
nsresult rv;
nsCachedNetData* cacheEntry;
PRUint32 bucket = HashRecordID(aRecordID);
cacheEntry = mMapRecordIdToEntry[bucket];
for (;cacheEntry; cacheEntry = cacheEntry->mNext) {
PRInt32 recordID;
rv = cacheEntry->GetRecordID(&recordID);
if (NS_FAILED(rv))
continue;
if ((recordID == aRecordID) && (cacheEntry->mCache == aCache))
return cacheEntry;
}
return 0;
}
// Add a cache entry to the hash table that maps record ID to entries
void
nsReplacementPolicy::AddCacheEntry(nsCachedNetData* aCacheEntry, PRInt32 aRecordID)
{
nsCachedNetData** cacheEntryp;
PRUint32 bucket = HashRecordID(aRecordID);
cacheEntryp = &mMapRecordIdToEntry[bucket];
while (*cacheEntryp)
cacheEntryp = &(*cacheEntryp)->mNext;
*cacheEntryp = aCacheEntry;
aCacheEntry->mNext = 0;
}
// Delete a cache entry from the hash table that maps record ID to entries
nsresult
nsReplacementPolicy::DeleteCacheEntry(nsCachedNetData* aCacheEntry)
{
nsresult rv;
PRInt32 recordID;
rv = aCacheEntry->GetRecordID(&recordID);
if (NS_FAILED(rv)) return rv;
PRUint32 bucket = HashRecordID(recordID);
nsCachedNetData** cacheEntryp;
cacheEntryp = &mMapRecordIdToEntry[bucket];
while (*cacheEntryp) {
if (*cacheEntryp == aCacheEntry) {
*cacheEntryp = aCacheEntry->mNext;
return NS_OK;
}
cacheEntryp = &(*cacheEntryp)->mNext;
}
NS_ASSERTION(0, "hash table inconsistency");
return NS_ERROR_FAILURE;
}
nsresult
nsReplacementPolicy::AddAllRecordsInCache(nsINetDataCache *aCache)
{
nsresult rv;
nsCOMPtr<nsISimpleEnumerator> iterator;
nsCOMPtr<nsISupports> iSupports;
nsCOMPtr<nsINetDataCacheRecord> record;
rv = aCache->NewCacheEntryIterator(getter_AddRefs(iterator));
if (!NS_SUCCEEDED(rv)) return rv;
while (1) {
PRBool notDone;
rv = iterator->HasMoreElements(&notDone);
if (NS_FAILED(rv)) return rv;
if (!notDone)
break;
rv = iterator->GetNext(getter_AddRefs(iSupports));
if (!NS_SUCCEEDED(rv)) return rv;
record = do_QueryInterface(iSupports);
rv = AssociateCacheEntryWithRecord(record, aCache, 0);
if (!NS_SUCCEEDED(rv)) return rv;
}
return NS_OK;
}
// Get current time and convert to seconds since the epoch
static PRUint32
now32()
{
double nowFP;
PRInt64 now64 = PR_Now();
LL_L2D(nowFP, now64);
PRUint32 now = (PRUint32)(nowFP * 1e-6);
return now;
}
void
nsCachedNetData::NoteDownloadTime(PRTime start, PRTime end)
{
double startFP, endFP, rate, duration;
LL_L2D(startFP, start);
LL_L2D(endFP, end);
duration = endFP - startFP;
// Sanity-check
if (!duration)
return;
// Compute download rate in kB/s
rate = mLogicalLength / (duration * (1e-6 /1024.0));
// Exponentially smooth download rate
const double alpha = 0.5;
mDownloadRate = (float)(mDownloadRate * alpha + rate * (1.0 - alpha));
}
// 1 hour
#define MIN_HALFLIFE (60 * 60)
// 1 week
#define TYPICAL_HALFLIFE (7 * 24 * 60 * 60)
/**
* Estimate the profit that would be lost if the given cache entry was evicted
* from the cache. Profit is defined as the future expected download delay per
* byte of cached content. The profit computation is made based on projected
* frequency of access, prior download performance and a heuristic staleness
* criteria. The technique used is a variation of that described in the
* following paper:
*
* "A Case for Delay-Conscious Caching of Web Documents"
* http://www.bell-labs.com/user/rvingral/www97.html
*
* Briefly, expected profit is:
*
* (projected frequency of access) * (download time per byte) * (probability freshness)
*/
void
nsCachedNetData::ComputeProfit(PRUint32 aNow)
{
PRUint32 K, now;
if (aNow)
now = aNow;
else
now = now32();
K = PR_MIN(MAX_K, mNumAccesses);
if (!K) {
mProfit = 0;
return;
}
// Compute time, in seconds, since k'th most recent access
double timeSinceKthAccess = now - mAccessTime[K - 1];
if (timeSinceKthAccess <= 0.0) // Sanity check
timeSinceKthAccess = 1.0;
// Estimate frequency of future document access based on past
// access frequency
double frequencyAccess = K / timeSinceKthAccess;
// If we don't have much historical data on access frequency
// use a heuristic based on document size as an estimate
if (mLogicalLength) {
if (K == 1) {
frequencyAccess /= pow(mLogicalLength, CACHE_CONST_B);
} else if (K == 2) {
frequencyAccess /= pow(mLogicalLength, CACHE_CONST_B / 2);
}
}
// Estimate likelihood that data in cache is fresh, i.e.
// that it corresponds to the document on the server
double probabilityFreshness;
PRInt32 halfLife, age, docTime;
bool potentiallyStale;
docTime = GetFlag(LAST_MODIFIED_KNOWN) ? mLastModifiedTime : mLastUpdateTime;
age = now - docTime;
probabilityFreshness = 1.0; // Optimistic
if (GetFlag(EXPIRATION_KNOWN)) {
potentiallyStale = now > mExpirationTime;
halfLife = mExpirationTime - mLastModifiedTime;
} else if (GetFlag(STALE_TIME_KNOWN)) {
potentiallyStale = true;
halfLife = mStaleTime - docTime;
} else {
potentiallyStale = true;
halfLife = TYPICAL_HALFLIFE;
}
if (potentiallyStale) {
if (halfLife < MIN_HALFLIFE)
halfLife = MIN_HALFLIFE;
probabilityFreshness = pow(0.5, (double)age / (double)halfLife);
}
mProfit = (float)(frequencyAccess * probabilityFreshness * mDownloadRate);
}
// Number of entries to grow mRankedEntries array when it's full
#define STATS_GROWTH_INCREMENT 256
// Sorting predicate for NS_Quicksort
int
nsCachedNetData::Compare(const void *a, const void *b, void *unused)
{
nsCachedNetData* entryA = (nsCachedNetData*)a;
nsCachedNetData* entryB = (nsCachedNetData*)b;
// Percolate deleted or empty entries to the end of the mRankedEntries
// array, so that they can be recycled.
if (!entryA || entryA->GetFlag(RECYCLED)) {
if (!entryB || entryB->GetFlag(RECYCLED))
return 0;
else
return +1;
}
if (!entryB || entryB->GetFlag(RECYCLED))
return -1;
// Evicted entries (those with no content data) and active entries (those
// currently being updated) are collected towards the end of the sorted
// array just prior to the deleted cache entries, since evicted entries
// can't be re-evicted.
if (entryA->GetFlag(UPDATE_IN_PROGRESS)) {
if (entryB->GetFlag(UPDATE_IN_PROGRESS))
return 0;
else
return +1;
}
if (entryB->GetFlag(UPDATE_IN_PROGRESS))
return -1;
PRUint16 Ka = PR_MIN(MAX_K, entryA->mNumAccesses);
PRUint16 Kb = PR_MIN(MAX_K, entryB->mNumAccesses);
// Order cache entries by the number of times they've been accessed
if (Ka < Kb)
return -1;
if (Ka > Kb)
return +1;
/*
* Among records that have been accessed an equal number of times, order
* them by profit.
*/
if (entryA->mProfit > entryB->mProfit)
return +1;
if (entryA->mProfit < entryB->mProfit)
return -1;
return 0;
}
/**
* Rank cache entries in terms of their elegibility for eviction.
*/
nsresult
nsReplacementPolicy::RankRecords()
{
PRUint32 i, now;
// Add all cache records if this is the first ranking
if (!mLastRankTime) {
nsresult rv;
CacheInfo *cacheInfo;
cacheInfo = mCaches;
while (cacheInfo) {
rv = AddAllRecordsInCache(cacheInfo->mCache);
if (NS_FAILED(rv)) return rv;
cacheInfo = cacheInfo->mNext;
}
}
// Get current time and convert to seconds since the epoch
now = now32();
// Recompute profit for every known cache record, except deleted ones
for (i = 0; i < mNumEntries; i++) {
nsCachedNetData* entry = mRankedEntries[i];
if (entry && !entry->GetFlag(nsCachedNetData::RECYCLED))
entry->ComputeProfit(now);
}
NS_QuickSort(mRankedEntries, mNumEntries, sizeof *mRankedEntries,
nsCachedNetData::Compare, 0);
mNumEntries -= mRecordsRemovedSinceLastRanking;
mRecordsRemovedSinceLastRanking = 0;
mLastRankTime = now;
return NS_OK;
}
// A heuristic policy to avoid the cost of re-ranking cache records by
// profitability every single time space must be made available in the cache.
void
nsReplacementPolicy::MaybeRerankRecords()
{
// Rank at most once per minute
PRUint32 now = now32();
if ((now - mLastRankTime) >= 60)
RankRecords();
}
void
nsReplacementPolicy::CompactRankedEntriesArray()
{
if (mRecordsRemovedSinceLastRanking || !mLastRankTime)
RankRecords();
}
nsresult
nsReplacementPolicy::CheckForTooManyCacheEntries()
{
if (mCapacityRankedEntriesArray == mMaxEntries) {
return DeleteOneEntry(0);
} else {
nsresult rv;
CacheInfo *cacheInfo;
cacheInfo = mCaches;
while (cacheInfo) {
PRUint32 numEntries, maxEntries;
rv = cacheInfo->mCache->GetNumEntries(&numEntries);
if (NS_FAILED(rv)) return rv;
rv = cacheInfo->mCache->GetMaxEntries(&maxEntries);
if (NS_FAILED(rv)) return rv;
if (numEntries == maxEntries)
return DeleteOneEntry(cacheInfo->mCache);
cacheInfo = cacheInfo->mNext;
}
}
return NS_OK;
}
/**
* Create a new association between a low-level cache database record and a
* cache entry. Add the entry to the set of entries eligible for eviction from
* the cache. This would typically be done when the cache entry is created.
*/
nsresult
nsReplacementPolicy::AssociateCacheEntryWithRecord(nsINetDataCacheRecord *aRecord,
nsINetDataCache* aCache,
nsCachedNetData** aResult)
{
nsCachedNetData* cacheEntry;
nsresult rv;
// First, see if the record is already known to the replacement policy
PRInt32 recordID;
rv = aRecord->GetRecordID(&recordID);
if (NS_FAILED(rv)) return rv;
cacheEntry = FindCacheEntryByRecordID(recordID, aCache);
if (cacheEntry) {
if (aResult) {
if (cacheEntry->GetFlag(nsCachedNetData::DORMANT))
cacheEntry->Resurrect(aRecord);
NS_ADDREF(cacheEntry);
*aResult = cacheEntry;
}
return NS_OK;
}
// Compact the array of cache entry statistics, so that free entries appear
// at the end, for possible reuse.
if (mNumEntries && (mNumEntries == mCapacityRankedEntriesArray))
CompactRankedEntriesArray();
// If compaction doesn't yield available entries in the
// mRankedEntries array, then extend the array.
if (mNumEntries == mCapacityRankedEntriesArray) {
PRUint32 newCapacity;
rv = CheckForTooManyCacheEntries();
if (NS_FAILED(rv)) return rv;
newCapacity = mCapacityRankedEntriesArray + STATS_GROWTH_INCREMENT;
if (newCapacity > mMaxEntries)
newCapacity = mMaxEntries;
nsCachedNetData** newRankedEntriesArray;
PRUint32 numBytes = sizeof(nsCachedNetData*) * newCapacity;
newRankedEntriesArray =
(nsCachedNetData**)nsAllocator::Realloc(mRankedEntries, numBytes);
if (!newRankedEntriesArray)
return NS_ERROR_OUT_OF_MEMORY;
mRankedEntries = newRankedEntriesArray;
mCapacityRankedEntriesArray = newCapacity;
PRUint32 i;
for (i = mNumEntries; i < newCapacity; i++)
mRankedEntries[i] = 0;
}
// Recycle the record after the last in-use record in the array
nsCachedNetData *entry = mRankedEntries[mNumEntries];
NS_ASSERTION(!entry || !entry->GetFlag(nsCachedNetData::RECYCLED),
"Only deleted cache entries should appear at end of array");
if (!entry) {
entry = new(mArena) nsCachedNetData;
if (!entry)
return NS_ERROR_OUT_OF_MEMORY;
mRankedEntries[mNumEntries] = entry;
} else {
// Clear out recycled data structure
nsCRT::zero(entry, sizeof(*entry));
}
entry->Init(aRecord, aCache);
AddCacheEntry(entry, recordID);
// Add one reference to the cache entry from the cache manager
NS_ADDREF(entry);
if (aResult) {
// And one reference from our caller
NS_ADDREF(entry);
*aResult = entry;
}
mNumEntries++;
return NS_OK;
}
nsresult
nsReplacementPolicy::GetCachedNetData(const char* cacheKey, PRUint32 cacheKeyLength,
nsINetDataCache* aCache,
nsCachedNetData** aResult)
{
nsresult rv;
nsCOMPtr<nsINetDataCacheRecord> record;
rv = aCache->GetCachedNetData(cacheKey, cacheKeyLength,
getter_AddRefs(record));
if (NS_FAILED(rv)) return rv;
return AssociateCacheEntryWithRecord(record, aCache, aResult);
}
/**
* Delete the least desirable record from the cache database. This is used
* when the addition of another record would exceed either the cache manager or
* the cache's maximum permitted number of records.
*/
nsresult
nsReplacementPolicy::DeleteOneEntry(nsINetDataCache *aCache)
{
PRUint32 i;
nsresult rv;
nsCachedNetData *entry;
i = 0;
while (1) {
MaybeRerankRecords();
for (; i < mNumEntries; i++) {
entry = mRankedEntries[i];
if (!entry || entry->GetFlag(nsCachedNetData::RECYCLED))
continue;
if (!aCache || (entry->mCache == aCache))
break;
}
// Report error if no record found to delete
if (i == mNumEntries)
return NS_ERROR_FAILURE;
rv = entry->Delete();
if (NS_SUCCEEDED(rv)) {
rv = DeleteCacheEntry(entry);
return rv;
}
}
}
nsresult
nsReplacementPolicy::GetStorageInUse(PRUint32* aStorageInUse)
{
nsresult rv;
CacheInfo *cacheInfo;
*aStorageInUse = 0;
cacheInfo = mCaches;
while (cacheInfo) {
PRUint32 cacheStorage;
rv = cacheInfo->mCache->GetStorageInUse(&cacheStorage);
if (NS_FAILED(rv)) return rv;
*aStorageInUse += cacheStorage;
cacheInfo = cacheInfo->mNext;
}
return NS_OK;
}
/**
* Delete the least desirable records from the cache until the occupancy of the
* cache has been reduced by the given number of KB. This is used when the
* addition of more cache data would exceed the cache's capacity.
*/
nsresult
nsReplacementPolicy::Evict(PRUint32 aTargetOccupancy)
{
PRUint32 i;
nsCachedNetData *entry;
nsresult rv;
PRUint32 occupancy;
PRInt32 truncatedLength;
nsCOMPtr<nsINetDataCacheRecord> record;
MaybeRerankRecords();
for (i = 0; i < mNumEntries; i++) {
rv = GetStorageInUse(&occupancy);
if (!NS_SUCCEEDED(rv)) return rv;
if (occupancy <= aTargetOccupancy)
return NS_OK;
entry = mRankedEntries[i];
// Skip deleted/empty cache entries and ones that have already been evicted
if (!entry || entry->GetFlag(nsCachedNetData::UNEVICTABLE))
continue;
if (entry->GetFlag(nsCachedNetData::ALLOW_PARTIAL)) {
rv = entry->GetRecord(getter_AddRefs(record));
if (NS_FAILED(rv))
continue;
PRUint32 contentLength;
rv = record->GetStoredContentLength(&contentLength);
if (NS_FAILED(rv))
continue;
// Additional cache storage required, in KB
PRUint32 storageToReclaim = (occupancy - aTargetOccupancy) << 10;
truncatedLength = (PRInt32)(contentLength - storageToReclaim);
if (truncatedLength < 0)
truncatedLength = 0;
} else {
truncatedLength = 0;
}
rv = entry->Evict(truncatedLength);
}
return NS_ERROR_FAILURE;
}

View File

@@ -0,0 +1,136 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998-1999 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Scott Furman, fur@netscape.com
*/
/**
* This class manages one or more caches that share a storage resource, e.g. a
* file cache and a flat-database cache might each occupy space on the disk and
* they would share a single instance of nsReplacementPolicy. The replacement
* policy heuristically chooses which cache entries to evict when storage is
* required to accommodate incoming cache data.
*/
#ifndef _nsReplacementPolicy_h_
#define _nsReplacementPolicy_h_
#include "nscore.h"
#include "nsISupportsUtils.h"
#include "nsINetDataCache.h"
#include "nsICachedNetData.h"
#include "nsIArena.h"
#include "nsCOMPtr.h"
#include "nsHashtable.h"
class nsCachedNetData;
struct PL_HashTable;
/**
* This private class is responsible for implementing the network data cache's
* replacement policy, i.e. it decides which cache data should be evicted to
* make room for new incoming data.
*/
class nsReplacementPolicy {
public:
nsReplacementPolicy();
~nsReplacementPolicy();
protected:
nsresult Init(PRUint32 aMaxCacheEntries);
nsresult AddCache(nsINetDataCache *aCache);
nsresult GetCachedNetData(const char* cacheKey, PRUint32 cacheKeyLength,
nsINetDataCache* aCache,
nsCachedNetData** aResult);
nsresult GetStorageInUse(PRUint32* aNumKBytes);
friend class nsCacheManager;
private:
nsresult RankRecords();
void MaybeRerankRecords();
void CompactRankedEntriesArray();
nsresult DeleteOneEntry(nsINetDataCache* aCache);
nsresult Evict(PRUint32 aTargetOccupancy);
nsCachedNetData* FindCacheEntryByRecordID(PRInt32 aRecordID, nsINetDataCache *aCache);
void AddCacheEntry(nsCachedNetData* aCacheEntry, PRInt32 aRecordID);
nsresult DeleteCacheEntry(nsCachedNetData* aCacheEntry);
PRUint32 HashRecordID(PRInt32 aRecordID);
nsresult AssociateCacheEntryWithRecord(nsINetDataCacheRecord *aRecord,
nsINetDataCache* aCache,
nsCachedNetData** aResult);
nsresult AddAllRecordsInCache(nsINetDataCache *aCache);
nsresult CheckForTooManyCacheEntries();
class CacheInfo;
private:
// Growable array of pointers to individual cache entries; It is sorted by
// profitability, such that low-numbered array indices refer to cache
// entries that are the least profitable to retain. New cache entries are
// added to the end of the array. Deleted cache entries are specially
// marked and percolate to the end of the array for recycling whenever
// mRankedEntries is sorted. Evicted cache entries (those with no
// associated content data) are retained for the purpose of improving the
// replacement policy efficacy, and are percolated towards the end of the
// array, just prior to the deleted cache entries.
//
// The array is not in sorted order 100% of the time; For efficiency
// reasons, sorting is only done when heuristically deemed necessary.
nsCachedNetData** mRankedEntries;
// Hash table buckets to map Record ID to cache entry. We use this instead
// of a PL_HashTable to reduce storage requirements
nsCachedNetData** mMapRecordIdToEntry;
// Length of mMapRecordIdToEntry array
PRUint32 mHashArrayLength;
// Linked list of caches that share this replacement policy
CacheInfo* mCaches;
// Allocation area for cache entry (nsCachedNetData) instances
nsCOMPtr<nsIArena> mArena;
// Bookkeeping
PRUint32 mRecordsRemovedSinceLastRanking;
// Maximum permitted length of mRankedEntries array
PRUint32 mMaxEntries;
// Number of occupied slots in mRankedEntries array
PRUint32 mNumEntries;
// Capacity of mRankedEntries array
PRUint32 mCapacityRankedEntriesArray;
// Time at which cache entries were last ranked by profitability
PRUint32 mLastRankTime;
};
#endif // _nsReplacementPolicy_h_

View File

@@ -0,0 +1,39 @@
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
#
DEPTH = ../../..
topsrcdir = @top_srcdir@
srcdir = @srcdir@
VPATH = @srcdir@
include $(DEPTH)/config/autoconf.mk
EXPORTS = \
nsICacheManager.h \
nsICacheObject.h \
nsICachePref.h \
nsICacheModule.h \
$(NULL)
EXPORTS := $(addprefix $(srcdir)/, $(EXPORTS))
include $(topsrcdir)/config/rules.mk

41
mozilla/netwerk/cache/public/Makefile.win vendored Executable file
View File

@@ -0,0 +1,41 @@
#!gmake
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is Netscape
# Communications Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
MODULE = nkcache
DEPTH = ..\..\..
include <$(DEPTH)/config/config.mak>
EXPORTS = \
$(NULL)
XPIDLSRCS = \
.\nsICachedNetData.idl \
.\nsINetDataCacheManager.idl \
.\nsINetDataCache.idl \
.\nsINetDataCacheRecord.idl \
.\nsINetDataDiskCache.idl \
.\nsIStreamAsFile.idl \
$(NULL)
include <$(DEPTH)/config/rules.mak>

View File

@@ -0,0 +1,229 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
*/
#include "nsrootidl.idl"
#include "nsISupports.idl"
interface nsIFileSpec;
interface nsIURI;
interface nsIObserver;
interface nsIChannel;
interface nsINetDataCache;
interface nsINetDataCacheRecord;
interface nsILoadGroup;
interface nsIStreamListener;
/**
* The nsICachedNetData interface represents a single entry in a database that
* caches data retrieved from the network. This interface is implemented by the
* cache manager on top of the low-level nsINetDataCacheRecord and
* nsINetDataCache interfaces that are implemented by the database.
*
* Each cache record may contain both content and metadata. The content may
* be, for example, GIF image data or HTML, and it is accessed through
* nsIChannel's streaming API. The opaque metadata, which may contain HTTP
* headers among other things, is stored as a byte array. Each entry in the
* cache is indexed by two different keys: a record id number and a key created
* by combining the URI with a "secondary key", e.g. HTTP post data.
*
* @See nsINetDataCacheRecord
* @See nsINetDataCache
* @See nsINetDataDiskCache
* @See nsINetDataCacheManager
*/
[scriptable, uuid(6aeb2a40-6d43-11d3-90c8-000064657374)]
interface nsICachedNetData : nsISupports
{
/**
* String form of the URI provided as an argument to the call to
* nsINetDataCacheManager::GetCachedNetData() that created this record.
*/
readonly attribute string uriSpec;
/**
* Getter for the opaque secondary database key provided as an argument to
* the call to nsINetDataCacheManager::GetCachedNetData() that created this
* record.
*/
void getSecondaryKey(out unsigned long length,
[retval, size_is(length)] out string secondaryKey);
/**
* This flag may be set by a protocol handler to indicate that it supports
* partial fetching of data. In that case, the cache manager is permitted
* to truncate the entry's content to accommodate incoming data for other
* cache entries rather than deleting it wholesale.
*/
attribute boolean allowPartial;
/**
* This flag indicates that the write stream supplying content data for the
* cache did not complete normally and, therefore, the content may be
* truncated.
*/
readonly attribute boolean partialFlag;
/**
* This flag can be set and cleared by a protocol handler as a form of
* self-notification, so as to avoid race conditions in which a protocol
* handler issues two identical network requests to fill the same cache
* entry. The cache manager itself largely ignores this flag.
*/
attribute boolean updateInProgress;
/**
* inUse is set if any existing channels are associated with this cache
* entry or if the updateInProgess flag is set. This can be used to
* prevent writing to a cache entry by a protocol handler if it's being
* read or written elsewhere.
*/
readonly attribute boolean inUse;
/**
* Date/time that the document was last stored on the origin server, as
* supplied by the protocol handler. This value is used as input to the
* cache replacement policy, i.e. it is not used for validation. If the
* protocol can't supply a last-modified time, this attribute should remain
* unset. When unset, the value of this attribute is zero.
*
* FIXME: Should use nsIDateTime interface, once it's created
* instead of PRTime, for improved scriptability ?
*/
attribute PRTime lastModifiedTime;
/**
* Supplied by the protocol handler, the expirationTime attribute specifies
* the time until which the document is guaranteed fresh, i.e. the document
* does not have to be validated with the server and, therefore, any data
* in cache is definitely usable. The value of this attribute serves as a
* hint to the cache replacement policy. Only one of either staleTime or
* expirationTime may be set for a single cache record. When unset, the
* value of this attribute is zero.
*/
attribute PRTime expirationTime;
/**
* Date/time supplied by the protocol handler, at which point the content
* is *likely* to be stale, i.e. the data in the cache may be out-of-date
* with respect to the data on the server. This heuristic date does not
* necessarily correspond to the HTTP Expires header, as it does not
* determine when cached network data must be validated with the origin
* server, but only serves as a hint to the cache replacement policy. Only
* one of either staleTime or expirationTime may be set for a single cache
* record. When unset, the value of this attribute is zero.
*/
attribute PRTime staleTime;
/**
* Date/time of last access of the data in this cache record, as determined
* by the cache manager.
*/
readonly attribute PRTime lastAccessTime;
/**
* Number of times this record has been accessed since it was first stored.
*/
readonly attribute PRUint16 numberAccesses;
/**
* Accessor methods for opaque meta-data which can be read and updated
* independently of the content data.
*
* The aTag argument can be used to accommodate multiple clients of the
* cache API, each of which wants to store its own private meta-data into
* the cache. For example, there could be a "headers" tag that the HTTP
* protocol handler uses to store http response headers and a "image size"
* tag used to store the image dimensions of a GIF file. The aData
* argument refers to an opaque blob of arbitrary bytes.
*
* IMPORTANT: If aData does not contain byte-oriented data, i.e. it's not a
* string, the contents of aData must be byte-swapped by the,
* caller, so as to make the cache files endian-independent.
*/
void getAnnotation(in string aTag,
out PRUint32 aLength, [size_is(aLength), retval] out string aData);
void setAnnotation(in string aTag,
in PRUint32 aLength, [size_is(aLength)] in string aData);
/**
* As a getter, return the number of content bytes stored in the cache,
* i.e. via the nsIChannel streaming APIs. This may be less than the
* complete content length if a partial cache fill occurred. The cached
* content can be truncated by setting the value of this attribute. The
* value of the attribute represents a logical, not a physical, length. If
* compression has been used, the content may consume less storage than
* indicated by this attribute.
*
* When this attribute is set to zero the associated cache disk file, if
* any, should be deleted.
*/
attribute PRUint32 storedContentLength;
/**
* Notify any observers associated with this cache entry of the deletion
* request. If all observers drop their reference to the cache entry,
* proceed to delete the underlying cache database record and associated
* content storage.
*/
void delete();
/**
* Flush any changes in this entry's data to the cache database. This
* method will automatically be called when the last reference to the cache
* is dropped, but it can also be called explicitly for a synchronous
* effect.
*/
void commit();
/**
* Parent container cache for this entry.
*/
readonly attribute nsINetDataCache cache;
/**
* Create a channel for reading or writing a stream of content into the
* entry. It is expected that many of the nsIChannel methods return
* NS_NOT_IMPLEMENTED, including:
*
* + GetURI()
* + GetContentType()
* + GetContentLength()
*
* Though nsIChannel provides for both async and synchronous I/O APIs, both
* may not be implemented. Only AsyncRead() and OpenOutputStream() is
* required. The aProxyChannel argument allows another channel to be
* specified as the proffered argument to nsIStreamListener methods rather
* than the cache's own channel.
*/
nsIChannel newChannel(in nsILoadGroup aLoadGroup,
in nsIChannel aProxyChannel);
/**
* This method can be used by a caching protocol handler to store data in
* the cache by forking an asynchronous read stream so that it is
* simultaneously sent to a requester and written into the cache. This
* method implicitly sets the updateInProgress flag, if it has not already
* been set.
*/
nsIStreamListener interceptAsyncRead(in nsIStreamListener aOriginalListener,
in PRUint32 aStartOffset);
};

View File

@@ -0,0 +1,143 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
*/
#include "nsISupports.idl"
interface nsIURI;
interface nsINetDataCacheRecord;
interface nsISimpleEnumerator;
interface nsIFileSpec;
/**
* The nsINetDataCache defines the low-level API for a network-data
* cache, used to cache the responses to network retrieval commands.
* This interface, along with nsINetDataCacheRecord, is implemented by
* the memory cache, the file cache and, optionally, by some extension
* caches. This interface is essentially a pseudo-private API for the
* cache manager. Other clients should never use this interface.
*
* Each cache entry may contain both content, e.g. GIF image data, and
* associated metadata, e.g. HTTP headers. Each entry is indexed by two
* different keys: a record id number and a key created by combining the URI
* with a "secondary key", e.g. HTTP post data.
*
* The nsINetDataCache interface is agnostic as to where the data is
* stored and whether the storage is volatile or persistent. The
* memory cache, any disk caches and any extension caches must all
* implement this interface.
*
*/
[scriptable, uuid(ccfc58c0-6dde-11d3-90c8-000064657374)]
interface nsINetDataCache : nsISupports
{
/**
* Human-readable description of the cache module, e.g. "Disk Cache"
*/
readonly attribute wstring description;
/**
* Returns true if cached data is available for the given opaque key,
* even if only partial data is stored.
*/
boolean contains([size_is(length)] in string key, in PRUint32 length);
/**
* Fetch the cache entry record for the given opaque key. If one does not
* exist, create a new, empty record.
*/
nsINetDataCacheRecord getCachedNetData([size_is(length)] in string key,
in PRUint32 length);
/**
* Fetch the cache entry record for the given URI using the record ID as a key.
*/
nsINetDataCacheRecord getCachedNetDataByID(in PRInt32 RecordID);
/**
* False indicates that this cache is entirely bypassed.
*/
attribute boolean enabled;
/**
* Constants for flags attribute, below
*/
// Used for extension caches, e.g. a CD-ROM cache
const long READ_ONLY = 1 << 0;
// One of these bits must be set
const long MEMORY_CACHE = 1 << 1;
const long FLAT_FILE_CACHE = 1 << 2;
const long FILE_PER_URL_CACHE = 1 << 3;
/**
* See constants defined above.
*/
readonly attribute PRUint32 flags;
/**
* Total number of URI entries stored in the cache.
*/
readonly attribute PRUint32 numEntries;
/**
* Maximum number of URI entries that may be stored in the cache.
*/
readonly attribute PRUint32 maxEntries;
/**
* Enumerate the URI entries stored in the cache.
*/
nsISimpleEnumerator newCacheEntryIterator();
/**
* Contains a reference to the next cache in search order. For the memory
* cache, this attribute always references the disk cache. For the disk
* cache, it contains a reference to the first extension cache.
*/
attribute nsINetDataCache nextCache;
/**
* An estimate of the amount of storage occupied by the cache, in kB.
* Actual use may be slightly higher than reported due to cache overhead
* and heap fragmentation (in the memory cache) or block quantization (in
* the disk cache).
*/
readonly attribute PRUint32 storageInUse;
/**
* Remove all entries from a writable cache. This could be used, for
* example, after a guest ends a browser session. This is equivalent to
* setting the cache's Capacity to zero, except that all cache entries,
* even those in active use, will be deleted. Also, any global cache
* database files will be deleted.
*/
void removeAll();
};
%{ C++
// ProgID prefix for Components that implement this interface
#define NS_NETWORK_CACHE_PROGID "component://netscape/network/cache"
#define NS_NETWORK_MEMORY_CACHE_PROGID NS_NETWORK_CACHE_PROGID "?name=memory-cache"
#define NS_NETWORK_FLAT_CACHE_PROGID NS_NETWORK_CACHE_PROGID "?name=flat-cache"
#define NS_NETWORK_FILE_CACHE_PROGID NS_NETWORK_CACHE_PROGID "?name=file-cache"
%}

View File

@@ -0,0 +1,163 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
*/
#include "nsISupports.idl"
interface nsISimpleEnumerator;
interface nsICachedNetData;
interface nsINetDataCache;
interface nsINetDataDiskCache;
interface nsIURI;
interface nsIFileSpec;
/**
* The network-response cache manager is partly responsible for the caching of
* content and associated metadata that has been retrieved via the network.
* (The remaining responsibility for caching lies with individual network
* protocol handlers.)
*
* The cache manager supervises the actions of individual cache components,
* such as the memory cache, the disk cache and any extension caches, e.g. a
* read-only CD-ROM cache.
*
* @See nsINetDataCache
* @See nsICachedNetData
*/
[scriptable, uuid(71c8ab00-6d5c-11d3-90c8-000064657374)]
interface nsINetDataCacheManager : nsISupports
{
/**
* Flag for the GetCachedNetData() method: If set, the memory cache is
* neither searched nor will any data be stored into it. This might be
* appropriate, for example, with images, because they have their own
* cache for storing *decoded* images.
*/
const unsigned long BYPASS_MEMORY_CACHE = 1 << 0;
/**
* Flag for the GetCachedNetData() method: If set, the disk cache
* is neither searched nor will any be data stored into it.
* However, read-only extension caches may be searched. This
* might be used to avoid leaving persistent records of secure
* data.
*/
const unsigned long BYPASS_PERSISTENT_CACHE = 1 << 1;
/**
* Flag for the GetCachedNetData() method: If set, any stream
* content is stored in the cache as a single disk file. Content
* will not be cached in the memory cache nor is it cached in a
* flat-file cache database. This is used to implement the jar
* protocol handler and to provide the stream-as-file semantics
* required by the classic bowser plugin API.
*/
const unsigned long CACHE_AS_FILE = 1 << 2;
/**
* Fetch the cache entry record for the given URI. If one does not exist,
* create a new, empty record. The normal search order for caches is:
* + Memory cache
* + Disk cache
* + File cache (stream-as-file cache)
* + All extension caches
*
* When writing, data is typically stored in both the memory cache and the
* disk cache. Both the search order and this write policy can be modified by
* setting one or more of the flag argument bits, as defined above.
*
* The optionally-NULL secondaryKey argument can be used, e.g. for form
* post data or for HTTP headers in the case of HTTP.
*/
nsICachedNetData getCachedNetData(in string uri,
[size_is(secondaryKeyLength)] in string secondaryKey,
in PRUint32 secondaryKeyLength,
in PRUint32 flags);
/**
* Returns true if cached content is available for the given URI, even if
* only partial data is stored. The flags argument behaves the same as for
* the GetCachedNetData() method, above.
*/
boolean contains(in string uri,
[size_is(secondaryKeyLength)] in string secondaryKey,
in PRUint32 secondaryKeyLength,
in PRUint32 flags);
/**
* Total number of unexpired URI entries stored in all caches. This number
* does not take into account duplicate URIs, e.g. because the memory cache
* and the disk cache might each contain an entry for the same URI.
*/
readonly attribute PRUint32 numEntries;
/**
* Enumerate the unexpired URI entries stored in all caches. Some URIs may
* be enumerated more than once, e.g. because the the memory cache and the
* disk cache might each contain an entry for the same URI.
*/
nsISimpleEnumerator newCacheEntryIterator();
/*
* Enumerate all the loaded nsINetDataCache-implementing cache modules.
* The first module enumerated will be the memory cache, the second will be
* the disk cache, then the file cache, followed by all the extension
* caches, in search order.
*/
nsISimpleEnumerator newCacheModuleIterator();
/**
* Remove all entries from all writable caches. This could be used, for
* example, after a guest ends a browser session. This is equivalent to
* setting the DiskCacheCapacity to zero, except that all cache entries,
* even those in active use, will be deleted. Also, any global cache
* database files will be deleted.
*/
void RemoveAll();
/**
* The disk cache is made up of the file cache (for stream-as-file
* requests) and a (possibly independent) persistent cache that handles all
* other cache requests. This attribute sets/gets the combined capacity of
* these caches, measured in KBytes. Setting the capacity lower than the
* current amount of space currently in use may cause cache entries to be
* evicted from the cache to accomodate the requested capacity.
*/
attribute PRUint32 diskCacheCapacity;
/**
* This attribute sets/gets the capacity of the memory cache, measured in
* KBytes. Setting the capacity lower than the current amount of space
* currently in use may cause cache entries to be evicted from the cache to
* accomodate the requested capacity.
*/
attribute PRUint32 memCacheCapacity;
/**
* This attribute must be set before attempting to store into the disk cache.
*/
attribute nsIFileSpec diskCacheFolder;
};
%{ C++
// ProgID prefix for Components that implement this interface
#define NS_NETWORK_CACHE_MANAGER_PROGID NS_NETWORK_CACHE_PROGID "?name=manager"
%}

View File

@@ -0,0 +1,125 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
*/
#include "nsISupports.idl"
#include "nsrootidl.idl"
interface nsIFileSpec;
interface nsIChannel;
interface nsINetDataCache;
/**
* The nsINetDataCacheRecord represents a single entry in a database that
* caches data retrieved from the network. On top of this low-level interface
* to the raw record data, the cache manager implements a higher-level record
* interface, nsICachedNetData. Each instance of nsINetDataCacheRecord is
* (internally) associated with a parent database, an instance of the
* nsINetDataCache interface. This interface is essentially a pseudo-private
* API for the cache manager. Other clients should never use this interface.
*
* Each cache record may contain both content and metadata. The content may
* be, for example, GIF image data or HTML, and it is accessed through
* nsIChannel's streaming API. The opaque metadata, which may contain HTTP
* headers among other things, is accessed as a contiguous byte array. Each
* entry in the cache is indexed by two different keys: a unique record id
* number, generated by the cache, and an opaque string. The latter contains
* the URI and other secondary key information, e.g. HTTP form post key/value
* pairs.
*
* The nsINetDataCacheRecord interface is agnostic as to where the data is
* stored and whether the storage is volatile or persistent. The memory cache,
* the disk cache, a flat-file cache and any read-only extension caches must
* all implement this interface.
*
* @See nsICachedNetData
* @See nsINetDataCache
* @See nsINetDataDiskCache
* @See nsINetDataCacheManager
*/
interface nsILoadGroup;
[scriptable, uuid(fdcdd6a0-7461-11d3-90ca-0040056a906e)]
interface nsINetDataCacheRecord : nsISupports
{
/**
* As far as the nsINetDataCacheRecord implementation is concerned, the
* cache entry database key is an opaque blob, but it's intended to contain
* both the URI and any secondary keys, such as HTTP post data.
*/
void getKey(out unsigned long length, [size_is(length), retval] out string key);
/**
* A persistent record number assigned by the cache which must be unique
* among all entries stored within the same cache. The record ID serves as
* an alternate key to the cache record. Providing that they satisfy the
* afforementioned uniqueness requirement, record IDs can be assigned any
* value by the database except that they may never be zero.
*/
readonly attribute PRInt32 recordID;
/**
* Opaque data which can be updated for each cache entry independently of
* the content data. This data is a combination of protocol-independent
* data provided by the cache manager and protocol-specific meta-data,
* e.g. HTTP headers.
*/
void getMetaData(out PRUint32 length, [size_is(length), retval] out string metaData);
void setMetaData(in PRUint32 length, [size_is(length)] in string data);
/**
* Number of content bytes stored in the cache, i.e. via the nsIChannel
* streaming APIs. This may be less than the complete content length if a
* partial cache fill occurred. Additionally, the cached content can be
* truncated by reducing the value of this attribute. When this attribute
* is set to zero the associated cache disk file, if any, should be
* deleted.
*/
attribute PRUint32 storedContentLength;
/**
* Delete this cache entry and its associated content.
*/
void delete();
/**
* Create a channel for reading or writing a stream of content into the
* entry. However, many of the nsIChannel methods may return
* NS_NOT_IMPLEMENTED, including:
*
* + GetURI()
* + GetContentType()
* + GetContentLength()
*/
nsIChannel newChannel(in nsILoadGroup loadGroup);
/**
* If a cache is implemented such that it stores each URI's content in an
* individual disk file, this method will identify the file corresponding
* to this record. This may be used to implement the "stream-as-file"
* semantics required by some plugins and by the 'jar:' protocol handler.
* However, not all cache implementations are *required* to store the data
* from each URI in an individual file, so it is acceptable for an
* implementation of this method to signal NS_NOT_IMPLEMENTED.
*/
readonly attribute nsIFileSpec filename;
};

View File

@@ -0,0 +1,42 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
*/
#include "nsINetDataCache.idl"
interface nsIFileSpec;
/**
/**
* A network-data disk cache is used to persistently cache the responses to
* network retrieval commands. Each cache entry may contain both content,
* e.g. GIF image data, and associated metadata, e.g. HTTP headers.
*/
[scriptable, uuid(6408e390-6f13-11d3-90c8-000064657374)]
interface nsINetDataDiskCache : nsINetDataCache
{
/**
* This attribute must be set before calling any other methods of this
* interface.
*/
attribute nsIFileSpec diskCacheFolder;
};

View File

@@ -0,0 +1,106 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* The contents of this file are subject to the Netscape Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1999 Netscape Communications Corporation. All
* Rights Reserved.
*
* Contributor(s):
* Scott Furman, fur@netscape.com
*/
#include "nsrootidl.idl"
#include "nsISupports.idl"
interface nsIFileSpec;
interface nsIStreamAsFileObserver;
/**
* In addition to enhancing effective network response time via caching, the
* cache manager serves a second purpose by providing the stream-as-file
* service required by traditional browser plugins and the jar: protocol
* handler. The interface below provides a means for a client to determine the
* filename associated with a stream and to detect modification/deletion of
* that file.
*/
[scriptable, uuid(0eedbbf0-92d9-11d3-90d3-0040056a906e)]
interface nsIStreamAsFile : nsISupports
{
/**
* Filename containing stream-as-file
*/
readonly attribute nsIFileSpec fileSpec;
/**
* Add an observer for this cache record. When the cache wants to delete
* or truncate a record, so as to make space for another cache entry's
* content data, it will call <code>aObserver</code>'s Observe() method,
* passing the nsIStreamAsFile instance as the <code>aSubject</code>
* argument and an appropriate message. If the observer does not wish to
* inhibit deletion/truncation, it should Release() any references it has to the
* cache record.
*
* @See nsIStreamAsFileObserver
*/
void addObserver(in nsIStreamAsFileObserver aObserver);
/**
* Delete an observer that was added by the AddObserver() method.
*/
void removeObserver(in nsIStreamAsFileObserver aObserver);
};
/**
* This interface can be implemented by a client to receive notifications of
* either modification or deletion of a file created by the cache manager using
* the stream-as-file semantics.
*/
[scriptable, uuid(a26e27c0-92da-11d3-90d3-0040056a906e)]
interface nsIStreamAsFileObserver : nsISupports
{
/**
* Flag bits for argument to Observe() method.
*/
const long NOTIFY_AVAILABLE = 1 << 0; // Stream-as-file now available for reading
const long NOTIFY_ERROR = 1 << 1; // Error while loading stream / creating file
const long REQUEST_DELETION = 1 << 2; // Cache manager wishes to delete/truncate file
const long INVALIDATE = 1 << 3; // File is out-of-date
// Convenience value
const long MAKE_UNAVAILABLE = REQUEST_DELETION | INVALIDATE;
/**
* Receive either a notification or a request concerning a file that has
* been opened using stream-as-file. The aMessage and aError arguments
* have varying values depending on the nature of the notification.
* aMessage is set to NOTIFY_AVAILABLE when a complete stream has been read
* and stored on disk in a file. At that point, and no sooner, may the
* filename attribute of the associated nsIStreamAsFile be accessed via the
* associated nsIStreamAsFile interface. If the aMessage argument is
* NOTIFY_ERROR, the aError argument contains the relevant error code. If
* the aMessage argument is either REQUEST_DELETION or REQUEST_TRUNCATION,
* the callee should immediately Release() all references to the
* nsIStreamAsFile (and any references to its associated nsICachedNetData
* instances), unless it wishes to inhibit the requested file modification.
* If the aMessage argument is INVALIDATE, the cache manager is replacing
* the file with a more recent version. If a client wants to continue
* using the (now out-of-date) file, it must delete it when it has finished,
* as the cache manager will effectively relinquished ownership of the
* file.
*/
void ObserveStreamAsFile(in nsIStreamAsFile aStreamAsFile,
in PRUint32 aMessage,
in nsresult aError);
};

View File

@@ -1,479 +0,0 @@
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Mozilla Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bugzilla Bug Tracking System.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s): Terry Weissman <terry@mozilla.org>
# Contains some global routines used throughout the CGI scripts of Bugzilla.
use strict;
use CGI::Carp qw(fatalsToBrowser);
require 'globals.pl';
##
## Utility routines to convert strings
##
# Get rid of all the %xx encoding and the like from the given URL.
sub url_decode {
my ($todecode) = (@_);
$todecode =~ tr/+/ /; # pluses become spaces
$todecode =~ s/%([0-9a-fA-F]{2})/pack("c",hex($1))/ge;
return $todecode;
}
# Quotify a string, suitable for putting into a URL.
sub url_quote {
my($toencode) = (@_);
$toencode =~ s/([^a-zA-Z0-9_\-\/.])/uc sprintf("%%%02x",ord($1))/eg;
return $toencode;
}
# Quotify a string, suitable for output as form values
sub value_quote {
my ($var) = (@_);
return "" if (!defined($var));
$var =~ s/\&/\&amp;/g;
$var =~ s/</\&lt;/g;
$var =~ s/>/\&gt;/g;
$var =~ s/\"/\&quot;/g;
$var =~ s/\n/\&#010;/g;
$var =~ s/\r/\&#013;/g;
return $var;
}
sub url_encode2 {
my ($s) = @_;
$s =~ s/\%/\%25/g;
$s =~ s/\=/\%3d/g;
$s =~ s/\?/\%3f/g;
$s =~ s/ /\%20/g;
$s =~ s/\n/\%0a/g;
$s =~ s/\r//g;
$s =~ s/\"/\%22/g;
$s =~ s/\'/\%27/g;
$s =~ s/\|/\%7c/g;
$s =~ s/\&/\%26/g;
$s =~ s/\+/\%2b/g;
return $s;
}
##
## Routines to generate html as part of Bonsai
##
# Create the URL that has the correct tree and batch information
sub BatchIdPart {
my ($initstr) = @_;
my ($result, $ro) = ("", Param('readonly'));
$initstr = "" unless (defined($initstr) && $initstr);
$result = $initstr if (($::TreeID ne "default") || $ro);
$result .= "&treeid=$::TreeID" if ($::TreeID ne "default");
$result .= "&batchid=$::BatchID" if ($ro);
return $result;
}
# Create a generic page header for bonsai pages
sub PutsHeader {
my ($title, $h1, $h2) = (@_);
if (!defined $h1) {
$h1 = $title;
}
if (!defined $h2) {
$h2 = "";
}
print "<HTML><HEAD>\n<TITLE>" . &html_quote($title) . "</TITLE>\n";
print $::Setup_String if (defined($::Setup_String) && $::Setup_String);
print Param("headerhtml") . "\n</HEAD>\n";
print "<BODY BGCOLOR=\"#FFFFFF\" TEXT=\"#000000\"\n";
print "LINK=\"#0000EE\" VLINK=\"#551A8B\" ALINK=\"#FF0000\">\n";
print PerformSubsts(Param("bannerhtml"), undef);
print "<TABLE BORDER=0 CELLPADDING=12 CELLSPACING=0 WIDTH=\"100%\">\n";
print " <TR>\n";
print " <TD>\n";
print " <TABLE BORDER=0 CELLPADDING=0 CELLSPACING=2>\n";
print " <TR><TD VALIGN=TOP ALIGN=CENTER NOWRAP>\n";
print " <FONT SIZE=\"+3\"><B><NOBR>$h1</NOBR></B></FONT>\n";
print " </TD></TR><TR><TD VALIGN=TOP ALIGN=CENTER>\n";
print " <B>$h2</B>\n";
print " </TD></TR>\n";
print " </TABLE>\n";
print " </TD>\n";
print " <TD>\n";
print Param("blurbhtml");
print "</TD></TR></TABLE>\n";
}
# Create a generic page trailer for bonsai pages
sub PutsTrailer {
my $args = BatchIdPart('?');
print "
<br clear=all>
<hr>
<a href=\"toplevel.cgi$args\" target=_top>Back to the top of Bonsai</a><br>
Found a bug or have a feature request?
<a href=\"http://bugzilla.mozilla.org/enter_bug.cgi?product=Webtools&component=Bonsai\">File a bug report</a> about it.
</html>
";
}
sub GeneratePersonInput {
my ($field, $required, $def_value, $extraJavaScript) = (@_);
if (!defined $extraJavaScript) {
$extraJavaScript = "";
}
if ($extraJavaScript ne "") {
$extraJavaScript = "onChange=\" $extraJavaScript \"";
}
return "<INPUT NAME=\"$field\" SIZE=32 $extraJavaScript VALUE=\"$def_value\">";
}
sub GeneratePeopleInput {
my ($field, $def_value) = (@_);
return "<INPUT NAME=\"$field\" SIZE=45 VALUE=\"$def_value\">";
}
sub make_options {
my ($src,$default,$isregexp) = (@_);
my $last = "";
my $popup = "";
my $found = 0;
if ($src) {
foreach my $item (@$src) {
if ($item eq "-blank-" || $item ne $last) {
if ($item eq "-blank-") {
$item = "";
}
$last = $item;
if ($isregexp ? $item =~ $default : $default eq $item) {
$popup .= "<OPTION SELECTED VALUE=\"$item\">$item";
$found = 1;
} else {
$popup .= "<OPTION VALUE=\"$item\">$item";
}
}
}
}
if (!$found && $default ne "") {
$popup .= "<OPTION SELECTED>$default";
}
return $popup;
}
sub make_popup {
my ($name,$src,$default,$listtype,$onchange) = (@_);
my $popup = "<SELECT NAME=$name";
if ($listtype > 0) {
$popup .= " SIZE=5";
if ($listtype == 2) {
$popup .= " MULTIPLE";
}
}
if (defined $onchange && $onchange ne "") {
$popup .= " onchange=$onchange";
}
$popup .= ">" . make_options($src, $default,
($listtype == 2 && $default ne ""));
$popup .= "</SELECT>";
return $popup;
}
sub cvsmenu {
my ($extra) = @_;
my ($pass, $i, $page, $title);
my ($desc, $branch, $root, $module);
LoadTreeConfig();
if (!defined $extra) {
$extra = "";
}
print "<table border=1 bgcolor=#ffffcc $extra>\n";
print "<tr><th>Menu</tr><tr><td><p>\n<dl>\n";
foreach $pass ("cvsqueryform|Query",
"rview|Browse",
"moduleanalyse|Examine Modules") {
($page, $title) = split(/\|/, $pass);
$page .= ".cgi";
print "<b>$title</b><br><ul>\n";
foreach $i (@::TreeList) {
$branch = '';
# HACK ALERT
# quick fix by adam:
# when browsing with rview, branch needs to be in 'rev' param
# not 'branch' param. don't ask me why ...
my $hack = ($page eq 'rview.cgi') ? 'rev' : 'branch';
$branch = "&$hack=$::TreeInfo{$i}{'branch'}"
if $::TreeInfo{$i}{'branch'};
$desc = $::TreeInfo{$i}{'shortdesc'};
$desc = $::TreeInfo{$i}{'description'} unless $desc;
$root = "cvsroot=$::TreeInfo{$i}{'repository'}";
$module = "module=$::TreeInfo{$i}{'module'}";
print "<li><a href=\"$page?$root&$module$branch\">$desc</a>\n";
};
print "</ul>\n";
};
if (open(EXTRA, "<data/cvsmenuextra")) {
while (<EXTRA>) {
print $_;
}
close EXTRA;
}
print "</dl>
<p></tr><tr><td>
Found a bug or have a feature request?
<a href=\"http://bugzilla.mozilla.org/enter_bug.cgi?product=Webtools&component=Bonsai\">File a bug report</a> about it.</td>
</tr></table>
";
}
##
## Routines to handle initializing CGI form data, cookies, etc...
##
sub ProcessFormFields {
my ($buffer) = (@_);
undef %::FORM;
undef %::MFORM;
my %isnull;
my $remaining = $buffer;
while ($remaining ne "") {
my $item;
if ($remaining =~ /^([^&]*)&(.*)$/) {
$item = $1;
$remaining = $2;
} else {
$item = $remaining;
$remaining = "";
}
my $name;
my $value;
if ($item =~ /^([^=]*)=(.*)$/) {
$name = $1;
$value = url_decode($2);
} else {
$name = $item;
$value = "";
}
if ($value ne "") {
if (defined $::FORM{$name}) {
$::FORM{$name} .= $value;
my $ref = $::MFORM{$name};
push @$ref, $value;
} else {
$::FORM{$name} = $value;
$::MFORM{$name} = [$value];
}
} else {
$isnull{$name} = 1;
}
}
if (%isnull) {
foreach my $name (keys(%isnull)) {
if (!defined $::FORM{$name}) {
$::FORM{$name} = "";
$::MFORM{$name} = [];
}
}
}
}
sub ProcessMultipartFormFields {
my ($boundary) = (@_);
$boundary =~ s/^-*//;
my $remaining = $ENV{"CONTENT_LENGTH"};
my $inheader = 1;
my $itemname = "";
while ($remaining > 0 && ($_ = <STDIN>)) {
$remaining -= length($_);
if ($_ =~ m/^-*$boundary/) {
$inheader = 1;
$itemname = "";
next;
}
if ($inheader) {
if (m/^\s*$/) {
$inheader = 0;
$::FORM{$itemname} = "";
}
if (m/^Content-Disposition:\s*form-data\s*;\s*name\s*=\s*"([^\"]+)"/i) {
$itemname = $1;
if (m/;\s*filename\s*=\s*"([^\"]+)"/i) {
$::FILENAME{$itemname} = $1;
}
}
next;
}
$::FORM{$itemname} .= $_;
}
delete $::FORM{""};
# Get rid of trailing newlines.
foreach my $i (keys %::FORM) {
chomp($::FORM{$i});
$::FORM{$i} =~ s/\r$//;
}
}
sub FormData {
my ($field) = (@_);
unless (defined($::FORM{$field})) {
print "\n<b>Error: Form field `<tt>$field</tt>' is not defined</b>\n";
exit 0;
}
return $::FORM{$field};
}
sub CheckEmailSyntax {
my ($addr) = (@_);
if ($addr !~ /^[^@, ]*@[^@, ]*\.[^@, ]*$/) {
print "Content-type: text/html\n\n";
print "<H1>Invalid e-mail address entered.</H1>\n";
print "The e-mail address you entered\n";
print "(<b>$addr</b>) didn't match our minimal\n";
print "syntax checking for a legal email address. A legal\n";
print "address must contain exactly one '\@', and at least one\n";
print "'.' after the \@, and may not contain any commas or.\n";
print "spaces.\n";
print "<p>Please click <b>back</b> and try again.\n";
exit;
}
}
############# Live code below here (that is, not subroutine defs) #############
$| = 1;
# Uncommenting this next line can help debugging.
# print "Content-type: text/html\n\nHello mom\n";
# foreach my $k (sort(keys %ENV)) {
# print "$k $ENV{$k}<br>\n";
# }
if (defined $ENV{"REQUEST_METHOD"}) {
if ($ENV{"REQUEST_METHOD"} eq "GET") {
if (defined $ENV{"QUERY_STRING"}) {
$::buffer = $ENV{"QUERY_STRING"};
} else {
$::buffer = "";
}
ProcessFormFields $::buffer;
} else {
if ($ENV{"CONTENT_TYPE"} =~
m@multipart/form-data; boundary=\s*([^; ]+)@) {
ProcessMultipartFormFields($1);
$::buffer = "";
} else {
read STDIN, $::buffer, $ENV{"CONTENT_LENGTH"} ||
die "Couldn't get form data";
ProcessFormFields $::buffer;
}
}
}
if (defined $ENV{"HTTP_COOKIE"}) {
foreach my $pair (split(/;/, $ENV{"HTTP_COOKIE"})) {
$pair = trim($pair);
if ($pair =~ /^([^=]*)=(.*)$/) {
$::COOKIE{$1} = $2;
} else {
$::COOKIE{$pair} = "";
}
}
}
if (defined $::FORM{'treeid'} && $::FORM{'treeid'} ne "") {
$::TreeID = $::FORM{'treeid'};
}
if (defined $::FORM{'batchid'}) {
my $bid = &ExpectDigit("batchid", $::FORM{'batchid'});
LoadBatchID();
if ($::BatchID != $bid) {
$::BatchID = $bid;
# load parameters first to prevent overwriting
Param('readonly');
$::param{'readonly'} = 1;
}
}
# Layers are supported only by Netscape 4.
# The DOM standards are supported by Mozilla and IE 5 or above. It should
# also be supported by any browser claiming "Mozilla/5" or above.
$::use_layers = 0;
$::use_dom = 0;
# MSIE chokes on |type="application/x-javascript"| so if we detect MSIE, we
# we should send |type="text/javascript"|. While we're at it, we should send
# |language="JavaScript"| for any browser that is "Mozilla/4" or older.
$::script_type = 'language="JavaScript"';
if (defined $ENV{HTTP_USER_AGENT}) {
my $user_agent = $ENV{HTTP_USER_AGENT};
if ($user_agent =~ m@^Mozilla/4.@ && $user_agent !~ /MSIE/) {
$::use_layers = 1;
} elsif ($user_agent =~ m@MSIE (\d+)@) {
$::use_dom = 1 if $1 >= 5;
$::script_type = 'type="text/javascript"';
} elsif ($user_agent =~ m@^Mozilla/(\d+)@) {
$::use_dom = 1 if $1 >= 5;
$::script_type = 'type="application/x-javascript"';
}
}
1;

View File

@@ -1,62 +0,0 @@
This file contains only important changes made to Bonsai. If you
are updating from an older version, make sure that you check this file!
For a more complete list of what has changed, use Bonsai itself, at
(http://cvs-mirror.mozilla.org/webtools/bonsai/cvsqueryform.cgi) to
query the CVS tree. For example,
http://cvs-mirror.mozilla.org/webtools/bonsai/cvsquery.cgi?module=all&branch=HEAD&branchtype=match&dir=mozilla%2Fwebtools%2Fbonsai&file=&filetype=match&who=&whotype=match&sortby=Date&hours=2&date=week&mindate=&maxdate=&cvsroot=%2Fcvsroot
will tell you what has been changed in the last week.
11/9/99 I have discovered that Bonsai gets all screwed up if you have multiple
files with the same name but different capitalization in your directory. This
is because the tables were all defined to have case-independent strings, but
you want them to be case-dependent. To fix, feed the following to mysql:
alter table dirs change column dir dir varchar(128) binary not null;
alter table files change column file file varchar(128) binary not null;
alter table people change column who who varchar(32) binary not null;
alter table repositories change column repository repository varchar(64) binary not null;
alter table branches change column branch branch varchar(64) binary not null;
alter table checkins change column revision revision varchar(32) binary not null, change column stickytag stickytag varchar(255) binary not null;
alter table tags change column revision revision varchar(32) binary not null;
10/12/99 Apparently, newer alphas of MySQL won't allow you to have
"when" as a column name. So, I have had to rename a column in the
checkins table. You must feed the below to mysql or you won't
work at all.
alter table checkins change column when ci_when datetime not null;
7/9/99 Ported completely to perl! (Due to heroic efforts by Dieter
Weber <dieter@Compatible.COM>). Among the things you need to do to
get this to work are:
- Realize that this installation will clear the "hook", and will
prevent you from seeing any old hooks that were created by the old
TCL code.
- Create a treeconfig.pl, based on the tree data in your old
(now obsolete) configdata.
- Make sure your perl contains the MailDate and libnet CPAN modules
(see INSTALL for how to get these)
- Add a new column to the descs table (Dieter added this to speedup
database rebuilds). Feed this to mysql:
alter table descs add column hash bigint not null;
- Go visit the new editparams.cgi page, and adjust everything.
- Change your mail alias to point to the new handleCheckinMail.pl
script (instead of handleCheckinMail.tcl)
- If you use the "administrator mail" feature, change its mail alias to
point to the new handleAdminMail.pl (instead of handleAdminMail.tcl).
4/30/99 Now uses autoconf, and comes with a configure script. A few
new variables can be defined in your configdata file, and probably
need to be. See the file configdata.in for a list of the new parameters.

View File

@@ -1,437 +0,0 @@
# -*- mode: indented-text -*-
#
# Author: Artem Belevich <abelevic@ctron.com>
#
# (Changes have been made to Artem's original doc, as things evolve.)
#
#
**********************************************************************
As it's said in README "This is not very well packaged code. It's
not packaged at all. Don't come here expecting something you plop in
a directory, twiddle a few things, and you're off and using it. Much
work has to be done to get there."
This file is intended to make some things *easier* but not easy. You
are still required to make some changes on your own. There is no
guaranteed solution yet and it's unlikely that there will be one in
the nearest future.
**********************************************************************
0. OVERVIEW
This document describes how to install Bonsai and make it work with LXR.
If you are only installing Bonsai, you can ignore the mentions about LXR
and Tinderbox. You will still probably want to get registry.
Some time ago I've seen Linux Source Navigator (LSN) at
http://sunsite.unc.edu/linux-source. I was impressed.
It was and is a wonderful tool to explore Linux kernel source code.
Then Mozilla.org came up with a more elaborate tool that includes
source browser with crossreferencing (LXR http://lxr.linux.no) and CVS
tree control (Bonsai - http://www.mozilla.org/bonsai.html).
While LXR formatting is not as pretty as LSN's one, it has a huge
advantage - it lets you see where the identifier is defined and used.
And Bonsai brings nice and easy (though sometimes incompatible with
browsers other but Netscape's own) interface to the CVS history. This
includes getting list of changes, diffs between revisions, etc.
All in all LXR+Bonsai+other stuff beneath is a useful tool capable
of handling huge projects.
It's not that easy to make it work with other source tree but
Mozilla's own but it's possible. And there are a lot of things to
improve. Now I'm going to concentrate on the first goal - to make it
work.
1. GETTING IT UP
First of all you have to get all the tools in mozilla's
mozilla/webtools CVS repository. This includes lxr,bonsai,registry
and tinderbox. You're likely will not need neither tinderbox but get
it just in case.
To get the sources you have to follow instructions on
http://www.mozilla.org/bonsai.html.
OK, now you've got the sources but don't rush to try it right
away. It's likely that you will not be able to even start most of
the scripts. There are more things you will have to get and install.
The short list of the things you will need:
1) MySQL database server.
2) Perl 5.004+ plus modules:
2a) Date::Parse
2b) Mail::Mailer
2c) DBI
2d) DBD::mysql
3) Some kind of HTTP server so you could use CGI scripts
You could try running the ./configure script to see what tools it
complains about right now. Mind you, it won't check for the MySQL
database.
1.1 Getting and setting up MySQL database
Visit MySQL homepage at http://www.mysql.com and grab the latest
stable binary release of the server. Sure, you can get sources and
compile them yourself, but binaries are the easiest and the fastest
way to get it up and running. Follow instructions found in
manual. There is a section about installing binary-only
distributions.
You should create database bonsai, and the user and password for it.
1.2 Perl + Mysql
You will need Perl 5.004 with DB and Mysql extensions.
DB is required to use LXR browser and crossreferencer for storing
its database. Mysql is used by Bonsai.
If you have Perl already installed, try to run genxref program from
LXR suite. If it complains that it misses DB terribly then you're
probably will have to get and install DB 1.86 distribution from one of the
CPAN (www.cpan.org) mirrors in src/misc directory. I personally got it
from http://www.cpan.org/src/misc/db.1.86.tar.gz. Having DB compiled
and installed you will also have to rebuild and reinstall Perl
itself so It would recognize and compile DB module in. This can be
tricky if you have DB installed in some strange place as I did.
I've got an error during linking phase - there was a function missing
in hash/ndbm.c file, so I just commented it out. It may potentially
cause troubles, but I think it does not matter in our case as this
was intended only for DBM compatibility - the feature we don't really
use.
Now you hopefully have Perl + DB compiled installed and working.
Time to set up Mysql module. This one is easy. Just follow
instructions in MySQL manual. You have to read manuals sometimes..
I think I'm getting older.. 8-)
Next step is to get TimeDate module from one of the CPAN mirrors.
Go to CPAN search page
(http://theory.uwinnipeg.ca/search/cpan-search.html) and search for
the "TimeDate" module. Then get it and install.
You also need to get the libnet and MailTools CPAN modules. They can
both be found on CPAN at CPAN/modules/by-authors/id/GBARR.
1.3 HTTP server
You have a freedom of choice here - Apache, Netscape or any other
server on UNIX would do. The only thing - to make configuration easier
you'd better run HTTP daemon on the same machine that you run MySQL
server on. Make sure that you can access 'bonsai' database with user
id you're running the daemon with.
Disable web access to the Bonsai data directory and its subdirectories.
In Apache you would write a <Directory> section in the config file, something
like:
<Directory /var/www/docs/bonsai/data>
AllowOverride None
Options None
Order deny,allow
Deny from All
</Directory>
2. TWEAKING THE TOOLS
Now you should have all necessary tools to be able to run LXR and
Bonsai scripts and see why the wouldn't work for you right now.
2.1 LXR
You can skip this section if you are not planning on installing LXR.
The first thing to set up is LXR tool. All it needs is the source
tree (not CVS tree). It's relatively easy and works almost right of
the box. Follow instructions in LXR README file.
Having set LXR you will see that regardless what your source tree
contains you will see that everything refers to it as Mozilla. Mozilla
is a great thing and this tool was primarily tailored to mozilla tree
but you'd like to control your own tree. First step is to edit your
Here is the short list of changes I had to make
file: ident
1) change "&root=/cvsroot" to your CVSROOT path
2) change "file=/mozilla/" to the directory under CVSROOT where
your sources are. In my case it is just "/"
file: index.html
Nothing vital here but probably worth changing to reflect your own
environment
file: lxr.conf
Changes to this file are described in LXR README file and are
quite simple.
file: source
You may find it useful to uncomment "$img = "/icons/..." lines if
you use Explorer as it does not have internal-gopher-* images
built in. Actually Bonsai contains a lot of netscapism that will
make your IE4 unhappy anyway. You'd better stick with Netscape if
you are going to use LXR/Bonsai
file: template-*
Here you will probably want to watch closely at the places where
you see the word 'mozilla' near '.cgi'. There are a lot of
mozilla-specific paths hardcoded
change/get rid of banner that loads straight from mozilla.org that
may be very dangerous if you're working for micro$oft and your
boss comes by.. 8-)
2.2 Bonsai
This stuff sometimes gets very specific about your CVS repository
setup. You have to make a lot of changes until more portable
configuration mechanism is introduced.
These steps should create a basic Bonsai install:
./configure
make install
You might want to give the option --prefix=<path> to configure to
install Bonsai in another place than /usr/local, e.g. /var/www. It
will make a new directory named "bonsai" in the prefix directory you specify.
Ensure that the bonsai cgi programs can write and create files in the
data directory. Typically this means making the data directory owned by
the web cgi id. Bonsai does not need to change the executable files in the
main bonsai directory so these can be owned as root.
Test using your web browser that you will not be able to access the data
directory (you should get "access denied").
Edit data/treeconfig.pl file:
treeconfig.pl defines @::TreeList, a list of trees you
want to track, and %::TreeInfo, information about each of those
trees. A sample treeconfig.pl:
@::TreeList = ('default', 'other');
%::TreeInfo = (
default => {
branch => '',
description => 'My CVS repository',
module => 'All',
repository => '/d2/cvsroot',
shortdesc => 'Mine',
},
other => {
branch => '',
description => 'Other CVS repository',
module => 'All',
repository => '/d2/otherroot',
shortdesc => 'Other',
},
);
1;
Create data/XXX directory for each tree you defined in treeconfig
(data/default and data/other using the example above). This file maps the
names of trees to branch/module combinations. You will need to have at
least one module in your CVS repository to run Bonsai. Typically users
create a module called All which contains all the directories in the CVS
repository. All repositories must be written as if they were local
repositories (eg '/cvsroot') without hostnames or ':pserver:'.
The cgi-bin scripts will access these directories on the web machine and
they must contain the ',v' files which match cvsroot as listed in the
checkin mail from the real CVS machine.
Run createlegaldirs.pl to create legaldirs for your module. Using the
sample treeconfig above you would run createlegaldirs.pl like this:
perl createlegaldirs.pl default other
Go to the data directory and run
trapdoor <admin password here> >passwd
it will set up admin's password.
Bonsai should now be accessible via a web browser but not all
functionality is installed yet. Visit admin.cgi and set all the parameters.
That's basically it. With some luck and persistence you will have 90%
working system at this point. A lot of these things are just asking to be
fixed in near feature. And I hope they will be.
3. Setting up database
This is quite simple but time consuming operation.
First create database structure by running:
maketable.sh
Edit it to use the user and password you want for the bonsai database.
Set file permissions so that only the Bonsai administrator can run this
file (typically owner and group are set to root, and access to all but
owner denied).
You must ensure that your web machine can access the CVS repositories
raw data files (',v' files). If the CVS repository is on another
machine then the web machine must be configured to be able to read the
files as if they were stored with the same pathes on the Web
machine. Uually this is accomplished via an NFS read only mount of the
cvsroot. You can check this configuration by looking at the file
$CVSROOT/modules,v (perhaps this needs the prefix trimmed from this
string to make a vaild path name). This file should be readable on
both the CVS machine and on the web machine.
Then go to Bonsai administration page and press "Rebuild CVS history"
button. Then you may go to the theater and watch a movie or two. It
will take a lot of time. It takes several seconds to process one
file. The more revisions in file the more time it will take. My SUN
workstation with 2x200Mhz UltraSPARC processors run about an hour to
process about 4K files with 20K+ revisions. Your mileage may vary.
If you need to do this more then once you may wish to purge the
legaldirs file in the data directory. This is a cache file which
holds the names of the directories in CVS, if a directory is not
listed here it will not be loaded into the database. Changes to the
modules file shoud probably be followed by a deletion of the legaldirs
file.
I have also found it useful to rerun maketables.sh before reloading the
CVS information. If I forget to do this step occasionally the load
will fail in the middle because of duplicate data in the table.
Copy "dolog.pl" to your CVSROOT directory, and check it in.
Add "dolog.pl" to CVSROOT/checkoutlist, and check it in.
Then, add a line to your CVSROOT/loginfo file that says something like:
ALL $CVSROOT/CVSROOT/dolog.pl -r /cvsroot bonsai-checkin-daemon@my.bonsai.machine
Replace "/cvsroot" with the name of the CVS root directory, and
"my.bonsai.machine" with the name of the machine Bonsai runs on.
Now, on my.bonsai.machine, add a mail alias so that mail sent to
"bonsai-checkin-daemon" will get piped to handleCheckinMail.pl. The
first argument to handleCheckinMail.pl is the directory that bonsai
is installed in. E.g. in /etc/aliases, add
bonsai-checkin-daemon: "|/usr/local/bonsai/handleCheckinMail.pl /usr/local/bonsai"
or whatever is appropriate for your mail transport agent. Note that if
you are using smrsh with Sendmail, you will need to list handleCheckinMail.pl
in /etc/smrsh. For example:
cd /etc/smrsh
ln -s /usr/local/bonsai/handleCheckinMail.pl handleCheckinMail.pl
and change the bonsai-checkin-daemon in /etc/aliases to point to
/etc/smrsh/handleCheckinMail.pl
4. Registry
The Bonsai administrator interface will let you specify where the registry
tools are located relative to bonsai. The default is ../registry. Copy
the registry directory into this location.
One of the registry files has a hardcoded netscape.com domain name in it.
Open who.cgi in your favorite editor and change that as needed.
5. Things to do
a) There should be better way to track CVS tree changes. Now it's done
by making CVS send e-mail about each checkin. (See the comments at
the top of dolog.pl for some clues.) One alternative theory would be
to take advantage of the CVS history command, which provides
all necessary information to get the list of recently committed files, so
there is no need to send/process email. Just set up a cron job that
will periodically look for CVS tree changes and update database. On
the other hand, it's not at all clear how efficient the cvs history
command is for large, active repositories.
b) Better configuration. One should not hardcode CVS tree <-> Source
tree translations. Another thing to configure - banners.
c) LXR could be improved in a number of ways. Using MySQL database
instead of DB would probably be a good idea. It's unclear what impact
it will have on performance though. Incremental database updates would
be nice. It might also be nice to borrow syntax highlighting from LSN.
6. Conclusion.
OK. This may or may not work for you. But I hope you had a great
time trying. Or just reading.
Any suggestions/additions are welcome.
7. APPENDIX: Permisions
7.1 mySQL Permissions
If you have trouble with the database, make bonsai database
writable by all users on your machine and change access level
later. This could save you a lot of time trying to guess whether it's
permissions or a mistake in the script that make things fail.
7.2 File System Permissions
Some symptoms that may be caused by wrong file permissions: pages do not
show up, or they show up only partially; new checkins do not show up.
The bonsai installation directory needs to be accessible by the web server
process and mail process that runs handleCheckinMail.pl. These are typically
"apache" and "mail", respectively. make install will set permissions to allow
everybody access. Note that maketables.sh should only be available to Bonsai
administrator, and you must change this by hand!
Everything in the data directory and its subdirectories needs to be
accessible by the web server process. Some of the files will also need to
be accessible by the mail process. Some files will need to be accessible to
all. Below is a sample that is known to work:
drwxrwxrwx apache mail ./
-rw-rw-rw- apache apache batch-1.pl
-rw-rw---- apache mail batchid.pl
-rw-rw---- apache apache cachedstartdates
drwxrwx--- apache mail checkinlog/
-rw-rw---- apache mail cvsgraph.conf
drwxrw---- apache mail default/
-rw-rw---- apache mail hidelist
-rw-rw-rw- apache apache hooklist
-rw-rw---- apache mail legaldirs
-rw-rw-rw- apache apache lock
-rw-rw-rw- apache mail log
-rw-rw-rw- apache apache motd.pl
-rw-rw-rw- apache apache params
-rw-rw-r-- apache apache passwd
-rwxrw---- apache apache trapdoor*
-rw-rw---- apache mail treeconfig.pl
-rw-rw-rw- apache apache whiteboard
7.3 Disable web access to data directory
You should make it so that web users can not browse the data directory,
or anybody can read data meant only for administrators. In Apache you would
typically write the following section in http.conf and restart the server:
<Directory /var/www/html/bonsai/data>
AllowOverride None
Options None
Order deny,allow
Deny from All
</Directory>

View File

@@ -1,150 +0,0 @@
#!gmake
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
# This Makefile helps you install Bonsai. Define PERL to
# the full pathnames of where you have these utilities. Define PREFIX
# to where you will install the running Bonsai. Then "make install" should
# copy things for you.
# /usr/bin/perl
PERL = @PERL@
# /var/www/bonsai
PREFIX = @prefix@/bonsai
CVS=@CVS@
RLOG=@RLOG@
CO=@CO@
RCSDIFF=@RCSDIFF@
CVSGRAPH=@CVSGRAPH@
FILES = CGI.pl \
addcheckin.pl \
admin.cgi \
adminfuncs.pl \
adminmail.pl \
closemessage \
contacthelp.html \
countcheckins.cgi \
createlegaldirs.pl \
cvsblame.cgi \
cvsblame.pl \
cvsguess.cgi \
cvsgraph.cgi \
cvslog.cgi \
cvsquery.cgi \
cvsquery.pl \
cvsqueryform.cgi \
cvsregexp.html \
cvsview2.cgi \
defparams.pl \
doadmin.cgi \
doeditcheckin.cgi \
doeditmessage.cgi \
doeditparams.cgi \
doeditwhiteboard.cgi \
dolog.pl \
dotweak.cgi \
editcheckin.cgi \
editmessage.cgi \
editparams.cgi \
editwhiteboard.cgi \
get_line.pl \
globals.pl \
handleAdminMail.pl \
handleCheckinMail.pl \
index.html \
maketables.sh \
moduleanalyse.cgi \
modules.pl \
multidiff.cgi \
openmessage \
rebuildcvshistory.cgi \
repophook.cgi \
rview.cgi \
showcheckins.cgi \
switchtree.cgi \
toplevel.cgi \
viewold.cgi \
trapdoor
all: treeconfig.pl params
treeconfig.pl: treeconfig.pl.in
cp treeconfig.pl.in treeconfig.pl
params: params.in
sed -e s#_CVS_#$(CVS)#g \
-e s#_RLOG_#$(RLOG)#g \
-e s#_CO_#$(CO)#g \
-e s#_RCSDIFF_#$(RCSDIFF)#g \
-e s#_CVSGRAPH_#$(CVSGRAPH)#g \
$< >$@
install: all
-mkdir -p $(PREFIX)
@for I in $(FILES); do \
echo Installing $$I && \
sed -e s#/usr/bin/perl#$(PERL)#g \
$$I > $(PREFIX)/$$I && \
chmod 755 $(PREFIX)/$$I; done
-mkdir -p $(PREFIX)/data && chmod 755 $(PREFIX)/data
cp bonsai.gif $(PREFIX)
chmod 755 $(PREFIX)/bonsai.gif
# put trapdoor into data
mv $(PREFIX)/trapdoor $(PREFIX)/data
@if test ! -r $(PREFIX)/data/treeconfig.pl ; then \
echo "Installing treeconfig.pl" && \
cp treeconfig.pl $(PREFIX)/data ; \
else \
echo ; \
echo "Not replacing existing treeconfig.pl" ; \
echo "Check treeconfig.pl in build directory for new features" ; \
fi
@if test ! -r $(PREFIX)/data/params ; then \
echo "Installing params" && \
cp params $(PREFIX)/data ; \
else \
echo ; \
echo "Not replacing existing params" ; \
fi
@if test ! -r $(PREFIX)/data/cvsgraph.conf ; then \
echo "Installing cvsgraph.conf" && \
cp cvsgraph.conf $(PREFIX)/data ; \
else \
echo ; \
echo "Not replacing existing cvsgraph.conf" ; \
fi
@echo
@echo "If you are updating an existing install, be sure to check"
@echo "editparams.cgi to see if there are any new things you should"
@echo "configure as this script will not overwrite your existing"
@echo "params file"
@echo
@echo "If you are installing a new Bonsai (not upgrading), you should"
@echo "run maketables.sh to create database tables, then customize the"
@echo "Bonsai configuration in $(PREFIX)/data/treeconfig.pl"
clean:
rm -f treeconfig.pl params
distclean: clean
rm -f config.cache config.log confdefs.h Makefile

View File

@@ -1,531 +0,0 @@
This is Bonsai. See <http://www.mozilla.org/bonsai.html>.
==========
DISCLAIMER
==========
This is not very well packaged code. It's not packaged at all. Don't
come here expecting something you plop in a directory, twiddle a few
things, and you're off and using it. Much work has to be done to get
there. We'd like to get there, but it wasn't clear when that would be,
and so we decided to let people see it first.
Don't believe for a minute that you can use this stuff without first
understanding most of the code.
Check out the INSTALL file for some guidance on getting started.
Many, many thanks to Artem Belevich <abelevic@ctron.com> for
trailblazing his way through this and writing down all the problems he
had.
============================
Configuration files you need
============================
Lots of configuration files need to be placed in the data subdir.
This is also where bonsai keeps its running state. These two things
ought to be split into different directories, but that hasn't happened
yet.
Some of these files are:
treeconfig.pl: some Perl source that defines @::TreeList, a list of trees you
want to track, and %::TreeInfo, information about each of those
trees.
params: This file contains many operating parameters. This can be
edited using the editparams.cgi webpage; you should probably
not edit it directory.
The ./configure script will make a guess at the parameters
that control paths for scripts to execute, and create an
initial params file for you. It looks for things on your
PATH, so if it complains, add the directories in which these
commands reside to your PATH, or override the path check, for
example:
setenv PERL /usr/local/lib/perl5
./configure
or for the Bourne shell:
PERL=/usr/local/lib/perl5 ./configure
hidelist: A list of regexps that define filenames that we don't want
to let people see via the bonsai pages. A common use is to
just have one line that says "CVSROOT". Note that the files
and directories will actually be visible, this just prevents
people from looking at their contents.
legaldirs: A list of directories to traverse when rebuilding the
history of the repository. This file is required to exist
for each module before you can start populating that module
with existing cvs data.
=================================
What's What in the Bonsai sources:
=================================
This is a rough first pass at cataloging and documenting the Bonsai
sources. Many hands have been in this code over the years, and it has
accreted wildly. There is probably quite a lot of dead code in here.
Makefile.in: "make install" lets you specify where you store
perl and bonsai on your system.
addcheckin.pl Perl. Add a checkin to a Bonsai hook. Determines
if the tree was open or closed at the time, shunts
checkin to proper tree.
admin.cgi Perl. Select from various administrative tasks
(which require a password.)
Called by: toplevel.cgi
Calls:
doadmin.cgi password=<text> treeid=<text>
command=[open|close]
closetimestamp=<time-text>
lastgood=<time-text>
doclear=<checkbox>
doadmin.cgi password=<text> treeid=<text>
command=tweaktimes
lastgood=<time-text>
lastclose=<time-text>
doadmin.cgi password=<text> treeid=<text>
command=editmotd
origmotd=<text>
motd=<text>
editmessage.cgi treeid=<text>
msgname=[openmessage|closemessage|
treeopened|treeopenedsamehook|
treeclosed]
#### note: no password?
repophook.cgi password=<text> treeid=<text>
command=repophook
startfrom=<time-text>
rebuildcvshistory.cgi password=<text>
treeid=<text>
command=rebuildcvs
startfrom=<time-text>
firstfile=<time-text>
subdir=<time-text>
doadmin.cgi password=<text> treeid=<text>
command=changepassword
password=<text>
newpassword=<text>
newpassword2=<text>
doglobal=<radio>
adminfuncs.pl Perl. Collection of functions to administrate a Bonsai
hook.
adminmail.pl Perl. Set of routines for opening and closing the
Bonsai hook based on receipt of e-mail.
bonsai.gif a bonsai tree.
closemessage HTML, text that gets sent to all people on the hook
when the tree is closed.
configure Configure script (generated from configure.in)
configure.in Configure.in script
contacthelp.html HTML, explanation of how to change someone's contact info
countcheckins.cgi Perl. Draws a graph of checkins for the various
Bonsai 'hooks'.
Called by: toplevel.cgi
Calls: nobody
createlegaldirs.pl Use this to create the 'legaldirs' file for a module.
Called by (via globals.pl LoadDirList):
addcheckin.pl
moduleanalyse.cgi
rebuildcvshistory.cgi
repophook.cgi
rview.cgi
cvsblame.cgi Runs through a CVS file and tells you who changed what.
Calls:
rview.cgi dir= cvsroot= rev=
cvsblame.cgi file= rev= root= mark=
cvsblame.cgi set_line= (cookie magic?)
cvsblame.cgi root= file= rev= use_html=
cvsgraph.cgi file=
cvsview2.cgi subdir= files= rev=
cvsview2.cgi root= subdir= files= rev1= rev2=
cvsqueryform.cgi
Called by:
cvsgraph.cgi
cvsguess.cgi
cvslog.cgi
cvsview2.cgi
moduleanalyse.cgi
cvsblame.pl Runs through a CVS file and tells you who changed what.
Called by:
cvsblame.cgi
cvslog.cgi
Calls: nobody
cvsguess.cgi Given a file name, try to figure out what directory
it's in. then link to cvsblame.cgi. parameters are
the same.
Seems to take an exact file name (sans directory),
then do a redirect to cvsblame.cgi. If there are
more than one file of that name, it presents a list.
This is (I think) redundant with LXR's file name
search.
Calls:
cvsblame.cgi file= rev= mark= #
Called by: *tinderbox
cvsindex.pl ??? DELETE
cvslog.cgi Web interface to "cvs log".
Calls:
rview.cgi dir= cvsroot= rev=
cvslog.cgi file= root= rev=
sort=[revision|date|author]
author=
cvsview2.cgi
command=DIFF_FRAMESET
diff_mode=context
whitespace_mode=show
root= subdir= file=
rev1= rev2=
cvsview2.cgi
command=DIRECTORY
subdir= files= root= branch=
Used to call:
cvsblame.cgi file= rev= root=
Called by:
cvsgraph.cgi
cvsblame.cgi
cvslog.cgi
cvsmenu.pl ??? DELETE
cvsquery.cgi Displays the results of a query entered in cvsqueryform
Called by:
cvsqueryform.cgi
Calls:
cvsqueryform.cgi
cvsview2 command=DIRECTORY
subdir= files= branch= root=
cvsview2.cgi command=DIFF_FRAMESET
diff_mode=context
whitespace_mode=show
subdir= file= rev1= rev2= root=
multidiff.cgi name=allchanges cvsroot=
cvsquery.cgi sortby=
../registry/who.cgi email=
http://scopus.mcom.com/bugsplat/show_bug.cgi
cvsquery.pl Actual query functions used by cvsquery.cgi
Called by:
cvsquery.cgi
cvsqueryform.cgi Main screen to let you query the CVS database.
Called by:
cvsblame.cgi
cvslog.cgi
cvsquery.cgi
toplevel.cgi
Calls:
cvsregexp.html
cvsquery.cgi
module=[all|allrepositories|?]
branch=
branchtype=[match|regexp]
directory=<text>
file=<text>
who=<text>
whotype=[match|regexp]
sortby=[Date|Who|File|Change Size]
date=[hours|day|week|month|all|
explicit]
hours=
mindate=
maxdate=
cvsroot=
cvsregexp.html Description of MySQL regular expression syntax
cvsview2.cgi Lets you view CVS diffs.
Called by:
cvsblame.cgi
cvslog.cgi
cvsquery.cgi
show2.cgi
showcheckins.cgi
Calls:
rview.cgi dir= cvsroot= rev=
cvsview2.cgi subdir= command=DIFF
root= file= rev1= rev2=
cvsview2.cgi subdir= command=DIFF_LINKS
root= file= rev1= rev2=
cvsview2.cgi subdir= command=DIFF
root= file= rev1= rev2= #
cvsview2.cgi subdir= command=DIFF_FRAMESET
root= file= rev1= rev2=
cvsview2.cgi subdir= command=DIRECTORY
root= files= branch= skip=
cvsview2.cgi subdir= command=LOG
root= file= rev=
doadmin.cgi Perl. Executes admin things asked for in admin.cgi
Called by:
admin.cgi
Calls:
mailto:clienteng
doeditcheckin.cgi Perl. Edits a checkin on the hook.
Called by:
editcheckin.cgi
Calls:
nobody
doeditmessage.cgi Perl. Edits one of the email messages that bonsai sends
people.
Called by:
editmessage.cgi
Calls:
nobody
doeditprofile.cgi Perl. Edit people's contact info. Left-over code from
before we started getting this info from LDAP.
Called by:
editprofile.cgi
Calls:
nobody
doeditwhiteboard.cgi Perl. Edits the free-for-all whiteboard.
Called by:
editwhiteboard.cgi
Calls:
nobody
dolog.pl Perl. Magic file that causes CVS to send mail to
Bonsai whenever someone makes a change. Please read
the comments towards the beginning for more clues.
dotweak.cgi Perl. Tweaks a bunch of checkins in ahook at once.
Called by:
show2.cgi
showcheckins.cgi
Calls:
nobody
editcheckin.cgi Perl. Edits a checkin on the hook.
Called by:
show2.cgi
showcheckins.cgi
Calls:
doeditcheckin.cgi
editmessage.cgi Perl. Edits one of the email messages that bonsai sends
people.
Called by:
admin.cgi
Calls:
doeditmessage.cgi
editprofile.cgi Perl. Edit people's contact info. Left-over code from
before we started getting this info from LDAP.
Called by:
localprofile.cgi
Calls:
doeditprofile.cgi
editwhiteboard.cgi Perl. Edits the free-for-all whiteboard.
Called by:
toplevel.cgi
Calls:
doeditwhiteboard.cgi
get_line.pl Provides line parsing function, get_line
Calls: nobody
Called by:
cvsquery.pl
modules.pl
globals.pl Common functions used by various scripts.
handleAdminMail.pl Perl. Mail is piped to this script and parsed.
Calls:
adminfuncs.pl
handleCheckinMail.pl Perl. Mail is piped to this script and parsed. It
then adds a checkin to a Bonsai hook.
header.pl ??? DELETE
index.html loads cvsqueryform.cgi
indextest.pl ??? DELETE
lloydcgi.pl parses CGI args from $QUERY_STRING and leaves them
in $form{$key}; and puts cookies in %cookie_jar.
Calls: nobody
Called by: whohastouchedwhat.cgi
maketables.sh Creates sql database & tables used by bonsai.
Called by:
nobody
moduleanalyse.cgi Shows the directories in a module.
Called by:
nobody
Calls:
moduleanalyse.cgi module=[all|?] cvsroot=
rview.cgi dir= cvsroot=
cvsblame.cgi file= root=
modules.pl Populates $::modules{} with list of CVS modules
from $cvsroot/CVSROOT/modules.
Called by:
cvsqueryform.cgi
multidiff.cgi Implements the "Show me ALL the Diffs" button
Called by:
cvsquery.cgi
show2.cgi
showcheckins.cgi
Calls:
nobody
openmessage Mail template that gets sent to people when they first
check into the tree
processqueue.pl Pipes data/queue files to dolog.pl. DELETE
rebuildcvshistory.cgi Perl. Admin script to go rebuild the bonsai database
from CVS.
Called by:
admin.cgi
Calls:
nobody
repophook.cgi Perl. Rebuilds a bonsai hook from the bonsai database.
Called by:
admin.cgi
Calls:
nobody
reposfiles.pl Generates a list of all files in the repository.
DELETE
rview.cgi Lets you browse a directory in a CVS repository.
Called by:
cvsblame.cgi
cvslog.cgi
cvsview2.cgi
moduleanalyse.cgi
Calls:
rview.cgi dir= cvsroot= rev=
rview.cgi dir= cvsroot= rev= ?=chdir
rview.cgi dir= cvsroot= rev= ?=Set Branch
../registry/file.cgi cvsroot= file= dir=
showcheckins.cgi Perl. Shows some set of checkins in a bonsai hook.
Called by:
admin.cgi
show2.cgi
toplevel.cgi
Calls:
dotweak.cgi
showcheckins.cgi [various funky args]
editcheckin.cgi id= [various funky args]w
http://phonebook/ds/dosearch/phonebook/...
cvsview2.cgi root= subdir= files=
command=DIRECTORY branch=
http://w3/cgi/cvsview2.cgi subdir= files=
command=DIRECTORY
multidiff.cgi allchanges=
switchtree.cgi Perl. Lets you choose a different bonsai branch.
Called by:
toplevel.cgi
Calls:
nobody
testlock.pl Tests the Un/Lock functionality of utils.pl. DELETE
toplevel.cgi Perl. Main interface to the bonsai hook.
Called by:
CGI.pl
contacthelp.html
index.html
sheriff2.html
switchtree.cgi
toplevel.cgi
viewold.cgi
Calls:
editwhiteboard.cgi [...]
http://phonebook/ds/dosearch/phonebook/...
showcheckins.cgi
http://warp/tinderbox/showbuilds.cgi
switchtree.cgi [...]
news:mcom.dev.client.build.busted
http://phonebook/
viewold.cgi [...]
countcheckins.cgi [...]
admin.cgi [...]
index.html
http://warp/client/dogbert/tree.html
contacthelp.html
http://warp/client/dogbert/buildlore/index.html
trapdoor Runs crypt on passwd
utils.pl Ancient globals.pl. DELETE
Called by: testlock.pl whohastouchedwhat.cgi
viewold.cgi Perl. Lets you choose an old bonsai hook to view.
Called by:
toplevel.cgi
Calls:
toplevel.cgi treeid=
=================
Glossary of terms
=================
Here are some funky terms you may find here and there:
Hook The 'hook' is actually the oldest part of the Bonsai
code. The idea is, every so often (at Netscape, it was
every day), some build engineers will close the tree
and make sure that everything still builds properly.
If it doesn't, then the build engineers want to have a
list of people they can go beat up, this being the list
of people who changed the tree since the last time they
successfully built the tree. Those people are "on the
hook"; they are held responsible for any probs that
arise.
So, it works out to: the list of people who have
checked in since the tree was last closed.
==========
Maintainer
==========
The current primary maintainer of Bonsai is Tara Hernandez <tara@tequilarista.org

View File

@@ -1,244 +0,0 @@
#!/usr/bin/perl -w
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
# SourceChecker.cgi -- tools for creating or modifying the dictionary
# used by cvsblame.cgi.
#
# Created: Scott Collins <scc@netscape.com>, 4 Feb 1998.
#
# Arguments (passes via GET or POST):
# ...
#
use strict;
use CGI;
use SourceChecker;
#
# Global
#
my $query = new CGI;
#
# Subroutines
#
sub print_page_header()
{
print <<'END_OF_HEADER';
<H1>SourceChecker Dictionary Maintainance</H1>
END_OF_HEADER
}
sub print_page_trailer()
{
print <<'END_OF_TRAILER';
<HR>
<FONT SIZE=-1>
Last updated 5 Feb 1998.
<A HREF="SourceChecker.cgi">Dictionary maintainance and help</A>.</FONT>
Mail feedback to <A HREF="mailto:scc?subject=[SourceChecker.cgi]">&lt;scc@netscape.com&gt;</A>.
END_OF_TRAILER
}
my $error_header = '<HR><H2>I couldn\'t process your request...</H2>';
sub print_error($)
{
my $message = shift;
print "$error_header<P><EM>Error</EM>: $message</P>";
$error_header = '';
}
sub print_query_building_form()
{
print $query->start_multipart_form;
print '<HR><H2>Build a new request</H2>';
print '<P>...to modify or create a remote dictionary with words from one or more local files.</P>';
print '<H3>Files on the server</H3>';
print '<P>...i.e., the dictionary to be created or modified.</P>';
print $query->textfield( -name=>'dictionary',
-default=>'',
-override=>1,
-size=>30 );
print '-- the path to dictionary.';
print '<H3>Files on your local machine</H3>';
print '<P>...that will be uploaded to the server, so their contents can be added to the dictionary.</P>';
print '<BR>';
print $query->filefield( -name=>'ignore_english', -size=>30 );
print '-- contains english (i.e., transformable) words to ignore.';
print '<BR>';
print $query->filefield( -name=>'ignore_strings', -size=>30 );
print '-- contains identifiers (i.e., non-transformable) words to ignore.';
print '<BR>';
print $query->filefield( -name=>'flag_strings', -size=>30 );
print '-- contains identifiers words to be flagged.';
print '<BR>';
print $query->filefield( -name=>'ignore_names', -size=>30 );
print '-- contains user names to be ignored.';
print '<BR>';
print $query->submit;
print $query->endform;
}
sub do_add_good_words($)
{
my $file = shift;
while ( <$file> )
{
next if /\#/;
add_good_words($_);
}
}
sub do_add_bad_words($)
{
my $file = shift;
while ( <$file> )
{
next if /\#/;
add_bad_words($_);
}
}
sub do_add_good_english($)
{
my $file = shift;
while ( <$file> )
{
next if /\#/;
add_good_english($_);
}
}
sub do_add_names($)
{
my $file = shift;
while ( <$file> )
{
next if /\#/;
add_names($_);
}
}
sub handle_query()
{
my $dictionary_path = $query->param('dictionary');
if ( ! $dictionary_path )
{
print_error('You didn\'t supply a path to the dictionary file.');
return;
}
dbmopen %SourceChecker::token_dictionary, "$dictionary_path", 0666
|| print_error("The dictionary you named could not be opened.");
my $added_some_words = 0;
my ($file_of_good_english, $file_of_good_words,
$file_of_bad_words, $file_of_names);
if ( $file_of_good_english = $query->param('ignore_english') )
{
do_add_good_english($file_of_good_english);
$added_some_words = 1;
}
if ( $file_of_good_words = $query->param('ignore_strings') )
{
do_add_good_words($file_of_good_words);
$added_some_words = 1;
}
if ( $file_of_bad_words = $query->param('flag_strings') )
{
do_add_bad_words($file_of_bad_words);
$added_some_words = 1;
}
if ( $file_of_names = $query->param('ignore_names') )
{
do_add_names($file_of_names);
$added_some_words = 1;
}
if ( ! $added_some_words )
{
print_error("You did not supply any words to add to the dictionary.");
}
dbmclose %SourceChecker::token_dictionary;
}
#
# The main script
#
print $query->header;
print $query->start_html(-title=>'SourceChecker Dictionary Maintainance',
-author=>'scc@netscape.com');
print_page_header();
if ( $query->param )
{
handle_query();
}
print_query_building_form();
print_page_trailer();
print $query->end_html;
__DATA__

View File

@@ -1,195 +0,0 @@
# -*- Mode: perl; tab-width: 4; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
package SourceChecker;
require Exporter;
@ISA = qw(Exporter);
@EXPORT = qw(%token_dictionary add_good_english add_good_words add_bad_words add_names tokenize_line markup_line);
@EXPORT_OK = qw($GOOD_TOKEN $UNKNOWN_TOKEN $BAD_TOKEN $NAME_TOKEN add_token canonical_token @markup_prefix @markup_suffix);
$GOOD_TOKEN = \-1;
$UNKNOWN_TOKEN = \0;
$NAME_TOKEN = \1;
$BAD_TOKEN = \2;
@markup_prefix = ('<FONT COLOR="green">', '<FONT COLOR="red">', '<FONT COLOR="blue">');
@markup_suffix = ('</FONT>', '</FONT>', '</FONT>');
sub canonical_token($)
{
my $token = shift;
if ( defined $token )
{
$token =~ s/[\'Õ\&]+//g;
$token = length($token)>2 ? lc $token : undef;
}
$token;
}
sub _push_tokens($$)
{
# Note: inherits |@exploded_phrases| and |@exploded_tokens| from caller(s)
push @exploded_phrases, shift;
push @exploded_tokens, canonical_token(shift);
}
sub _explode_line($)
{
# Note: inherits (and returns results into) |@exploded_phrases| and |@exploded_tokens| from caller(s)
my $line = shift;
my $between_tokens = 0;
foreach $phrase ( split /([A-Za-z\'Õ\&]+)/, $line )
{
if ( $between_tokens = !$between_tokens )
{
_push_tokens($phrase, undef);
next;
}
for ( $_ = $phrase; $_; )
{
m/^[A-Z\'Õ\&]*[a-z\'Õ\&]*/;
$token = $&;
$_ = $';
if ( ($token =~ m/[A-Z][a-z\'Õ]+/) && $` )
{
$token = $&;
_push_tokens($`, $`);
}
_push_tokens($token, $token);
}
}
$#exploded_phrases;
}
sub tokenize_line($)
{
my $line = shift;
local @exploded_tokens;
_explode_line($line);
my $i = -1;
foreach $token ( @exploded_tokens )
{
$exploded_tokens[++$i] = $token if defined $token;
}
$#exploded_tokens = $i;
@exploded_tokens;
}
sub markup_line($)
{
my $line = shift;
local @exploded_phrases;
local @exploded_tokens;
_explode_line($line);
$i = 0;
foreach $phrase ( @exploded_phrases )
{
$phrase =~ s/&/&amp;/g;
$phrase =~ s/</&lt;/g;
$phrase =~ s/>/&gt;/g;
my $token = $exploded_tokens[$i];
if ( defined $token && ($token_kind = $token_dictionary{$token}) >= 0 )
{
$phrase = $markup_prefix[$token_kind] . $phrase . $markup_suffix[$token_kind];
}
++$i;
}
join '', @exploded_phrases;
}
sub add_token($$)
{
(my $token, my $token_kind) = @_;
if ( !defined $token_dictionary{$token} || ($token_kind > $token_dictionary{$token}) )
{
$token_dictionary{$token} = $token_kind;
}
}
sub add_good_english($)
{
my $line = shift;
foreach $token ( tokenize_line($line) )
{
add_token($token, $$GOOD_TOKEN);
my $initial_char = substr($token, 0, 1);
(my $remainder = substr($token, 1)) =~ s/[aeiouy]+//g;
$abbreviated_length = length($remainder) + 1;
if ( $abbreviated_length != length($token) && $abbreviated_length > 2 )
{
add_token("$initial_char$remainder", $$GOOD_TOKEN);
}
}
}
sub _add_tokens($$)
{
(my $line, my $token_kind) = @_;
foreach $token ( tokenize_line($line) )
{
add_token($token, $token_kind);
}
}
sub add_good_words($)
{
_add_tokens(shift, $$GOOD_TOKEN);
}
sub add_bad_words($)
{
_add_tokens(shift, $$BAD_TOKEN);
}
sub add_names($)
{
_add_tokens(shift, $$NAME_TOKEN);
}
1;

View File

@@ -1,36 +0,0 @@
dnl -*- Mode: m4; tab-width: 4; indent-tabs-mode: nil; -*-
dnl autoconf tests for bonsai
AC_DEFUN(AC_CHECK_PERL_MODULE,
[ AC_MSG_CHECKING("for perl $1...")
ac_mod_name=`echo $1 | tr ':' '_'`
AC_CACHE_VAL(ac_cv_perl_$ac_mod_name,
[ $PERL -w -c -e "use $1;" 2>/dev/null
ac_has_mod=$?
if test "$ac_has_mod" = "0"; then
eval "ac_cv_perl_$ac_mod_name=yes"
else
eval "ac_cv_perl_$ac_mod_name=no"
fi
])
if eval "test \"`echo '$ac_cv_perl_'$ac_mod_name`\" = yes"; then
AC_MSG_RESULT(yes)
ifelse([$2], , :, [$2])
else
AC_MSG_RESULT(no)
ifelse([$3], , , [$3
])dnl
fi
])
AC_DEFUN(AC_CHECK_PERL_MODULES,
[for ac_mod in $1; do
AC_CHECK_PERL_MODULE($ac_mod,
[ changequote(, )dnl
ac_tr_func=HAVE_`echo $ac_func | tr 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' | tr ':' '_'`
changequote([, ])dnl
AC_DEFINE_UNQUOTED($ac_tr_func) $2], $3)dnl
done
])

View File

@@ -1,249 +0,0 @@
#!/usr/bin/perl -w
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
require 'globals.pl';
use vars qw($BatchID @TreeList @LegalDirs);
use File::Path;
if (@::CheckInList) {
die '@::CheckInList is valid ?!?';
}
my $inheader = 1;
my $foundlogline = 0;
my @filelist = ();
my $log = '';
my $appendjunk = '';
my $repository = pickDefaultRepository();
my %group = ();
my $forcetreeid = '';
my ($chtype, $date, $name, $dir, $file);
my ($version, $sticky, $branch, $addlines, $removelines);
my ($key, $junk, $tagtime, $tagname, @data);
my ($mungedname, $filename, @treestocheck);
my (@files, @fullinfo, $i, $okdir, $f, $full, $d, $info, $id);
my ($mail, %substs, %headers, $body);
if (($#ARGV >= 1) && ($ARGV[0] eq '-treeid')) {
$forcetreeid = $ARGV[1];
shift; shift;
}
# Read in from remaining file arguments
DATAFILE:
for ( ; $#ARGV >= 0; shift) {
next DATAFILE
unless (open(FILE, $ARGV[0]));
LINE:
while (<FILE>) {
my $line = $_;
chop($line);
$line = trim($line);
if ($inheader) {
$inheader = 0 if ($line =~ /^$/);
next LINE;
}
unless ($foundlogline) {
if ($line =~ /^.\|/) {
$appendjunk .= "$line\n";
($chtype, $date, $name, $repository, $dir, $file,
$version, $sticky, $branch, $addlines, $removelines) =
split(/\|/, $line);
$addlines = 0 if (!defined($addlines) ||
$addlines =~ /^\s*$/);
$removelines = 0 if (!defined($removelines) ||
$removelines =~ /^\s*$/);
$key = "$date|$branch|$repository|$dir|$name";
$group{$key} .=
"$file|$version|$addlines|$removelines|$sticky\n";
} elsif ($line =~ /^Tag\|/) {
($junk, $repository, $tagtime, $tagname, @data) =
split(/\|/, $line);
($mungedname = $repository) =~ s!/!_!g;
$filename = "data/taginfo/$mungedname/" .
MungeTagName($tagname);
Lock();
unless (-d "data/taginfo/$mungedname") {
mkpath(["data/taginfo/$mungedname"], 1, 0777);
}
if (open(TAGFILE, ">> $filename")) {
print TAGFILE "$tagtime|" . join('|', @data) . "\n";
close(TAGFILE);
chmod(0666, $filename);
}
Unlock();
} elsif ($line =~ /^LOGCOMMENT/) {
$foundlogline = 1;
}
next LINE;
}
last LINE if ($line eq ":ENDLOGCOMMENT");
$log .= "$line\n";
}
close(FILE);
# unlink($ARGV[0]);
my $plainlog = $log;
$log = MarkUpText(html_quote(trim($log)));
next DATAFILE unless ($plainlog && $appendjunk);
Lock();
LoadTreeConfig();
unless ($forcetreeid) {
($mungedname = $repository) =~ s!/!_!g;
$mungedname =~ s!^_!!;
$filename = "data/checkinlog/$mungedname";
unless (-d "data/checkinlog") {
mkpath(["data/checkinlog"], 1, 0777);
}
if (open(TID, ">> $filename")) {
print TID "${appendjunk}LOGCOMMENT\n$plainlog:ENDLOGCOMMENT\n";
close(TID);
chmod(0666, $filename);
}
ConnectToDatabase();
AddToDatabase($appendjunk, $plainlog);
DisconnectFromDatabase(); # Minimize time connected to the DB, and
# only do it while Lock()'d. That way,
# zillions of addcheckin processes can't
# lock up mysqld.
@treestocheck = @::TreeList;
}
Unlock();
@treestocheck = ($forcetreeid) if $forcetreeid;
foreach $key (keys(%group)) {
($date, $branch, $repository, $dir, $name) = split(/\|/, $key);
@files = ();
@fullinfo = ();
foreach $i (split(/\n/, $group{$key})) {
($file, $version, $addlines, $removelines) = split(/\|/, $i);
push @files, $file;
push @fullinfo, $i;
}
TREE:
foreach $::TreeID (@treestocheck) {
next TREE if exists($::TreeInfo{$::TreeID}{nobonsai});
next TREE
unless ($branch =~ /^.?$::TreeInfo{$::TreeID}{branch}$/);
next TREE
unless ($repository eq $::TreeInfo{$::TreeID}{repository});
LoadDirList();
$okdir = 0;
FILE:
foreach $f (@files) {
$full = "$dir/$f";
LEGALDIR:
foreach $d (sort( grep(!/\*$/, @::LegalDirs))) {
$d =~ s@^[\.]/@@;
if ($d eq "\." || $d eq "/" || $full =~ m!^$d\b/!) {
$okdir = 1;
last LEGALDIR;
}
}
last FILE if $okdir;
}
next TREE unless $okdir;
Lock();
undef $::BatchID;
undef @::CheckInList;
LoadCheckins();
$id = "::checkin_${date}_$$";
push @::CheckInList, $id;
$info = eval("\\\%$id");
%$info = (
person => $name,
date => $date,
dir => $dir,
files => join('!NeXt!', @files),
'log' => $log,
treeopen => $::TreeOpen,
fullinfo => join('!NeXt!', @fullinfo)
);
WriteCheckins();
Log("Added checkin $name $dir " . join(' + ', @files));
Unlock();
if ($::TreeOpen) {
$filename = DataDir() . "/openmessage";
foreach $i (@::CheckInList) {
$filename = "this file doesn't exist"
# XXX verify...
if ((eval("\$$i" . "{person}") eq $name) &&
($i ne $id));
}
} else {
$filename = DataDir() . "/closemessage";
}
if (!$forcetreeid && -f $filename && open(MAIL, "$filename")) {
$mail = join("", <MAIL>);
close(MAIL);
%substs = (
profile => GenerateProfileHTML($name),
nextclose => "We don't remember close " .
"times any more...",
name => EmailFromUsername($name),
dir => $dir,
files => join(',', @files),
'log' => $log,
);
$mail = PerformSubsts($mail, \%substs);
%headers = ParseMailHeaders($mail);
%headers = CleanMailHeaders(%headers);
$body = FindMailBody($mail);
my $mail_relay = Param("mailrelay");
my $mailer = Mail::Mailer->new("smtp",
Server => $mail_relay);
$mailer->open(\%headers)
or warn "Can't send hook mail: $!\n";
print $mailer "$body\n";
$mailer->close();
}
}
}
}

View File

@@ -1,265 +0,0 @@
#!/usr/bin/perl -w
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
require 'CGI.pl';
use strict;
sub StupidFuncToShutUpWarningsByUsingVarsAgain {
my $z;
$z = $::TreeOpen;
$z = $::CloseTimeStamp;
}
Lock();
LoadCheckins();
LoadMOTD();
LoadTreeConfig();
Unlock();
my $BIP = BatchIdPart('?');
my $BIP_nohook = BatchIdPart();
print "Content-type: text/html\n\n";
PutsHeader("Bonsai Administration [`$::TreeID' Tree]",
"Bonsai Administration",
"Administrating `$::TreeID' Tree");
print <<EOF ;
<pre>
</pre>
<center><b>
You realize, of course, that you have to know the magic password to do
anything from here.
</b></center>
<pre>
</pre>
<hr>
EOF
TweakCheckins();
CloseTree();
TweakTimestamps();
ChangeMOTD();
EditEmailMessage();
RebuildHook();
RebuildHistory();
ChangePasswd();
PutsTrailer();
exit 0;
sub TweakCheckins {
print qq(
<a href="showcheckins.cgi?tweak=1$BIP_nohook">
Go tweak bunches of checkins at once.</a><br>
<a href="editparams.cgi">
Edit Bonsai operating parameters.</a>
<hr>
);
}
sub CloseTree { # Actually opens tree also
my $timestamp = value_quote(MyFmtClock(time));
print qq(
<FORM method=get action=\"doadmin.cgi\">
<B>Password:</B> <INPUT NAME=password TYPE=password> <BR>
<INPUT TYPE=HIDDEN NAME=treeid VALUE=$::TreeID>
);
if ($::TreeOpen) {
print qq(
<INPUT TYPE=HIDDEN NAME=command VALUE=close>
<B>Closing time stamp is:</B>
<INPUT NAME=closetimestamp VALUE=\"$timestamp\"><BR>
<INPUT TYPE=SUBMIT VALUE=\"Close the tree\">
);
} else {
print qq(
<INPUT TYPE=HIDDEN NAME=command VALUE=open>
<B>The new \"good\" timestamp is:</B>
<INPUT NAME=lastgood VALUE=\"$timestamp\"><BR>
<INPUT TYPE=CHECKBOX NAME=doclear CHECKED>Clear the list of checkins.<BR>
<INPUT TYPE=SUBMIT VALUE=\"Open the tree\">
);
}
print qq(</FORM>\n<hr>\n\n);
}
sub TweakTimestamps {
my $lg_timestamp = value_quote(MyFmtClock($::LastGoodTimeStamp));
my $c_timestamp = value_quote(MyFmtClock($::CloseTimeStamp));
print qq(
<FORM method=get action=\"doadmin.cgi\">
<B>Password:</B> <INPUT NAME=password TYPE=password> <BR>
<INPUT TYPE=HIDDEN NAME=treeid VALUE=$::TreeID>
<INPUT TYPE=HIDDEN NAME=command VALUE=tweaktimes>
<TABLE>
<TR>
<TD><B>Last good timestamp:</B></TD>
<TD><INPUT NAME=lastgood VALUE=\"$lg_timestamp\"></TD>
</TR><TR>
<TD><B>Last close timestamp:</B></TD>
<TD><INPUT NAME=lastclose VALUE=\"$c_timestamp\"></TD>
</TR>
</TABLE>
<INPUT TYPE=SUBMIT VALUE=\"Tweak the timestamps\">
</FORM>
<hr>
);
}
sub ChangeMOTD {
my $motd = value_quote($::MOTD);
print qq(
<FORM method=get action=\"doadmin.cgi\">
<INPUT TYPE=HIDDEN NAME=treeid VALUE=$::TreeID>
<B>Password:</B> <INPUT NAME=password TYPE=password> <BR>
<INPUT TYPE=HIDDEN NAME=command VALUE=editmotd>
Change the message-of-the-day:<br>
<INPUT TYPE=HIDDEN NAME=origmotd VALUE=\"$motd\">
<TEXTAREA NAME=motd ROWS=10 COLS=50>$::MOTD</TEXTAREA><BR>
<INPUT TYPE=SUBMIT VALUE=\"Change the MOTD\">
</FORM>
<hr>
);
}
sub EditEmailMessage {
print qq(
<FORM method=get action=\"editmessage.cgi\">
<INPUT TYPE=HIDDEN NAME=treeid VALUE=$::TreeID>
Change the e-mail message sent:
<SELECT NAME=msgname SIZE=1>
<OPTION VALUE=openmessage>when a checkin is made when the tree is open.
<OPTION VALUE=closemessage>when a checkin is made when the tree is closed.
<OPTION VALUE=treeopened>to the hook when the tree opens
<OPTION VALUE=treeopenedsamehook>to the hook when the tree opens and the hook isn\'t cleared
<OPTION VALUE=treeclosed>to the hook when the tree closes
</SELECT><br>
<INPUT TYPE=SUBMIT VALUE=\"Edit a message\">
</FORM>
<hr>
);
}
sub RebuildHook {
my $lg_timestamp = value_quote(MyFmtClock($::LastGoodTimeStamp));
print qq(
<FORM method=get action=\"repophook.cgi\">
<INPUT TYPE=HIDDEN NAME=treeid VALUE=$::TreeID>
<B>Password:</B> <INPUT NAME=password TYPE=password> <BR>
<INPUT TYPE=HIDDEN NAME=command VALUE=repophook>
Repopulate the hook from scratch.<p>
<font color=red size=+2>This can be very dangerous.</font>
<br>
You should usually only need to do this to populate a new Bonsai branch.
<p>
<b>Use any checkin since:</b>
<INPUT NAME=startfrom VALUE=\"$lg_timestamp\">
<br>
<INPUT TYPE=SUBMIT VALUE=\"Rebuild the hook\">
</FORM>
<hr>
);
}
sub RebuildHistory {
my $timestamp = value_quote(MyFmtClock(0));
print qq(
<FORM method=get action=\"rebuildcvshistory.cgi\">
<INPUT TYPE=HIDDEN NAME=treeid VALUE=$::TreeID>
<B>Password:</B> <INPUT NAME=password TYPE=password> <BR>
<INPUT TYPE=HIDDEN NAME=command VALUE=rebuildcvs>
Recreate the entire list of every checkin ever done to the
$::TreeInfo{$::TreeID}{repository} repository from scratch.
<p>
<font color=red size=+2>This can take an incredibly long time.</font>
<br>
You should usually only need to do this when first introducing an entire CVS repository into Bonsai.
<p>
<b>Ignore checkins earlier than:</b>
<INPUT NAME=startfrom VALUE=\"$timestamp\">
<br>
<b>Ignore files before (must be full path starting
with $::TreeInfo{$::TreeID}{repository}; leave blank to do everything):</b>
<INPUT NAME=firstfile VALUE=\"\" size=50>
<br>
<b>Only do files within the subdirectory of
$::TreeInfo{$::TreeID}{repository} named:</b>
<INPUT NAME=subdir VALUE=\".\" size=50>
<br>
<INPUT TYPE=SUBMIT VALUE=\"Rebuild cvs history\">
</FORM>
<hr>
);
}
sub ChangePasswd {
print qq(
<FORM method=post action=\"doadmin.cgi\">
<INPUT TYPE=HIDDEN NAME=treeid VALUE=$::TreeID>
<INPUT TYPE=HIDDEN NAME=command VALUE=changepassword>
Change password.<BR>
<B>Old password:</B> <INPUT NAME=password TYPE=password> <BR>
<B>New password:</B> <INPUT NAME=newpassword TYPE=password> <BR>
<B>Retype new password:</B> <INPUT NAME=newpassword2 TYPE=password> <BR>
<INPUT TYPE=RADIO NAME=doglobal VALUE=0 CHECKED>Change password for this tree<BR>
<INPUT TYPE=RADIO NAME=doglobal VALUE=1>Change master Bonsai password<BR>
<INPUT TYPE=SUBMIT VALUE=\"Change the password\">
</FORM>
);
}

View File

@@ -1,119 +0,0 @@
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
use strict;
# Shut up misguided -w warnings about "used only once". "use vars" just
# doesn't work for me.
sub adminfuncs_pl_sillyness {
my $zz;
$zz = $::TreeID;
}
require 'globals.pl';
use Mail::Internet;
use Mail::Header;
sub MakeHookList {
my ($checkin, $person, %people, @addrs);
# First, empty the arrays
undef %people; undef @addrs;
foreach $checkin (@::CheckInList) {
my $info = eval("\\\%$checkin");
$people{$$info{'person'}} = 1;
}
foreach $person (sort(keys(%people))) {
push @addrs, EmailFromUsername($person);
}
return @addrs;
}
sub SendHookMail {
my ($filename) = @_;
my $hooklist = join(', ', MakeHookList());
my (%substs, %headers, $body, $mail);
local *MAIL;
my $pathname = DataDir() . "/$filename";
return unless $hooklist;
return unless -f $pathname;
return unless open(MAIL, "< $pathname");
$mail = join("", <MAIL>);
close (MAIL);
%substs = ();
$substs{'hooklist'} = $hooklist;
$mail = PerformSubsts($mail, \%substs);
%headers = ParseMailHeaders($mail);
%headers = CleanMailHeaders(%headers);
$body = FindMailBody($mail);
my $mail_relay = Param("mailrelay");
my $mailer = Mail::Mailer->new("smtp", Server => $mail_relay);
$mailer->open(\%headers)
or warn "Can't send hook mail: $!\n";
print $mailer "$body\n";
$mailer->close();
}
sub AdminOpenTree {
my ($lastgood, $clearp) = @_;
return if $::TreeOpen;
$::LastGoodTimeStamp = $lastgood;
$::TreeOpen = 1;
PickNewBatchID();
if ($clearp) {
SendHookMail('treeopened');
@::CheckInList = ();
} else {
SendHookMail('treeopenedsamehook');
}
Log("Tree opened. \$::LastGoodTimeStamp is " .
MyFmtClock($::LastGoodTimeStamp));
}
sub AdminCloseTree {
my ($closetime) = @_;
return unless $::TreeOpen;
$::CloseTimeStamp = $closetime;
$::TreeOpen = 0;
SendHookMail('treeclosed');
Log("Tree $::TreeID closed. \$::CloseTimeStamp is " .
MyFmtClock($::CloseTimeStamp));
}

View File

@@ -1,71 +0,0 @@
#!/usr/bin/perl -w
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
require 'globals.pl';
require 'adminfuncs.pl';
use strict;
sub GetDate {
my ($line) = (@_);
my $date;
if ($line =~ /([0-9].*)$/) {
$date = str2time($1);
} else {
$date = time();
}
return $date;
}
Lock();
open(FID, "<$ARGV[0]") || die "Can't open $ARGV[0]";
while (<FID>) {
chomp();
my $line = $_;
if ($line =~ /^([^ ]*)\s+([^ ]*)/) {
my $foobar = $1;
$::TreeID = $2;
$::TreeID = $2; # Duplicate line to avoid stupid perl warning.
undef @::CheckInList;
undef @::CheckInList; # Duplicate line to avoid stupid perl warning.
if ($foobar =~ /^opennoclear$/i) {
LoadCheckins();
AdminOpenTree(GetDate($line), 0);
WriteCheckins();
} elsif ($foobar =~ /^open$/i) {
LoadCheckins();
AdminOpenTree(GetDate($line), 1);
WriteCheckins();
} elsif ($foobar =~ /^close$/i) {
LoadCheckins();
AdminCloseTree(GetDate($line));
WriteCheckins();
}
}
}
Unlock();

View File

@@ -1,45 +0,0 @@
%define _prefix /var/www/cgi-bin/bonsai
# auto generate the version number based on the output of the date
# command.
%define _version %(eval "date '+%Y%m%d'")
Summary: Development monitoring tool
Name: bonsai-local-conf
Version: %{_version}
Release: 1
Copyright: MPL
Group: Development/Tools
Source: tar://bonsai_local_conf.tar.gz
Prefix: %{_prefix}
Buildroot: /var/tmp/%{name}-root
%description
The local configuration files for bonsai. This package customizes
bonsai for the local use. The bonsai package is genaric, this
package contains all the discriptions of the local system by providing
the data subdirectory files.
%prep
# empty prep
%build
#empty build
%install
rm -rf $RPM_BUILD_ROOT
mkdir -p $RPM_BUILD_ROOT/%{_prefix}
cd $RPM_BUILD_ROOT/%{_prefix}
tar zxf %{_sourcedir}/bonsai_local_conf.tar.gz
%clean
rm -rf $RPM_BUILD_ROOT
%files
%defattr(-,apache,apache)
%{_prefix}/data/*

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

View File

@@ -1,58 +0,0 @@
%define _prefix /var/www/cgi-bin/bonsai
# auto generate the version number based on the output of the date
# command.
%define _version %(eval "date '+%Y%m%d'")
Summary: Web and SQL interface to CVS
Name: bonsai
Version: %{_version}
Release: 1
Copyright: MPL
Group: Development/Tools
Source: cvs://:pserver:anonymous@cvs-mirror.mozilla.org:/cvsroot:mozilla/webtools/bonsai/bonsai.tar.gz
Prereq: apache
Prefix: %{_prefix}
Buildroot: /var/tmp/%{name}-root
%description
%prep
%setup -q -n bonsai
%build
prefix='%{_prefix}' \
./configure
make
%install
rm -rf $RPM_BUILD_ROOT
mkdir -p $RPM_BUILD_ROOT/%{_prefix}
make PREFIX=$RPM_BUILD_ROOT/%{_prefix} \
install
# the data directory needs to be group writable so that the cgi's can update
# files in it. No other program needs to use this directory.
chmod 770 $RPM_BUILD_ROOT/%{_prefix}/data
# config files do not belong as part of this package,
# they have their own package
rm -rf $RPM_BUILD_ROOT/%{_prefix}/data/*
%clean
#rm -rf $RPM_BUILD_ROOT
# the data dir must be writable by the cgi process.
%files
%defattr(-,root,root)
%{_prefix}
%defattr(-,apache,apache)
%{_prefix}/data

View File

@@ -1,201 +0,0 @@
So you want to run Bonsai? It's better that you know how Bonsai works
since your obviously going to be hacking around in the code to change
it to your individual site configuration.
o What Bonsai can do for you:
Bonsai allows you to query the contents of your CVS tree, figuring out
the differences between arbitrary versions and/or branches of a file
and allows you to watch those changes over time.
One of the problems with CVS is that although it allows you to define
logical groups of directories into a module, it has no way to define a
module that represents a specific branch within one or more of those
directories. Bonsai allows you to define a module that represents
both a directory and a branch within that directory in your CVS
repository.
Bonsai is tree control.
---------------------
o How does it work?
To do all this, Bonsai requires access to the source of your CVS
repository. This means that it will actually read the source files in
their ,v format. It is not enough that you have access to a checked
out copy of an arbitrary CVS tree. Bonsai also reads the modules that
you have defined in the modules file in the CVSROOT directory of your
CVS repository. The logical mappings that you set up in that file
define the base Bonsai modules that Bonsai will use to set up your
queries.
In order to keep track of these changes in a format that is easily
queried Bonsai also requires access to a relational database, in this
case MySQL. Once your CVS tree is in place and Bonsai has been
installed, you will import the important data from your CVS repository
into the Bonsai database. This doesn't import the entire repository
verbatim, it only reads and stores the information that it needs
including information about users, dates, file names, versions and
branch information.
To keep track of changes over time, Bonsai also requires notification
through some kind of asynchronous method to know that you have updated
a file. It keeps track of these changes through email. In CVS
every time that you make a check-in, any scripts that are defined in the
loginfo file in the CVSROOT directory of your CVS repository will be
run and the information about that check-in will be passed to that
script. Bonsai requires that you add a script to that file that will
automatically generate a specially formatted email. That email will
then be sent to a special account and, in turn, a script. That script
will then parse the email and update the Bonsai database with the
check-in information.
This method, while seemingly roundabout, provides a few advantages.
It keeps you from constantly polling your CVS tree to check for
changes. This can be a very intensive operation on large
repositories. This method is pretty reliable. Mail messages are
rarely lost on systems.
Bonsai requires that it always have read access to the CVS repository.
It does not ever need to write to the repository so this means you can
use a read-only nfs setup or some other mirroring strategy.
The last part of Bonsai is the web based interface. This interface is
where you do most of the day-to-day administration and querying. The
interface uses the backend database and the configuration files that
you set up.
---------------
o How do I set up my administration password?
When you build bonsai, the program "trapdoor" is installed into the data
directory in your Bonsai installation tree. Change to the data
directory in your installation and run the command:
trapdoor <your admin password> > passwd
If you look at the file you will see your admin password in standard
unix crypt() format.
---------------
o Ok, I've installed the files. What do I do now?
First, you need to define logical Bonsai modules on top of the modules
that you have already defined in CVS. Any CVS modules that you do not
define here will still show up in the Bonsai query interface.
However, defining Bonsai modules allows you access to the most
commonly used modules and allows you fast access to the branches of a
particular module. Also, to import a directory from CVS into Bonsai
it must be included as part of one of the Bonsai modules.
To set up the Bonsai modules you need to edit the configdata script in
the data/ directory of your Bonsai installation. The first part of
this file contains a list of the Bonsai modules that you want to
define and looks something like this:
set treelist {default module_a module_b module_c}
Please note the "default" module. You can define this module to be
any of the modules in your CVS tree. It is probably best that you
define it as your most used. It _must_ be defined.
For each of the Bonsai modules you need to define the information that
describes that module. For example, for you default module you can
define the following information:
set treeinfo(default,module) XYZSource
set treeinfo(default,branch) {}
set treeinfo(default,repository) {/cvsroot}
set treeinfo(default,description) {XYZ Sourcecode}
set treeinfo(default,shortdesc) {XYZ}
Each of the treeinfo settings describes the following things:
module: This is the logical module as defined in your modules file on
the CVS repository.
branch: This is the branch within that module. As above, you don't
have to have one of these defined. If you don't it's the same as the
HEAD branch.
repository: This is the directory that contains the repository.
description: This is the long description for the module, used
throughout the web interface.
shortdesc: This is a shorter version of the description.
Here is another example using a branch:
set treeinfo(module_a,module) ABCSource
set treeinfo(module_a,branch) {ACB_MERGE_1_0_BRANCH}
set treeinfo(module_a,repository) {/cvsroot}
set treeinfo(module_a,description) {ABC Sourcecode}
set treeinfo(module_a,shortdesc) {ABC}
Also in the configdata file you need to define the absolute paths to
some more commonly used commands and configuration information. These
are pretty self explanatory:
set cvscommand /usr/local/bin/cvs
set rlogcommand /usr/bin/rlog
set rcsdiffcommand /usr/bin/rcsdiff
set cocommand /usr/bin/co
set lxr_base http://www.abc.com/webtools/lxr/source
set mozilla_lxr_kludge TRUE
Once you have set up these configuration items you also need to make a
directory in your data directory that has the same name as each of the
modules above. For example, for the default module defined above you
would need to create a directory called "ABCSource".
-----------------
o How do I import data?
You can do this from the administration menu in Bonsai. Go to the
toplevel of Bonsai and choose the module that you want to import by
using the pull down menu. Then click on the link near the bottom of
the page for administration. This will take you to the administration
page for that module. If this is the first time importing data, find
the section that has a button labeled "Rebuild CVS history". When you
fill in your administration password and click on the button, all of
the history information for that Bonsai module will be rebuilt.
You need to do this once for all of the modules that you have defined.
Unfortunately, there is no way to import an entire CVS tree from the
root.
------------------
o How do I set up mail for bonsai?
There are three things that you need to do to to set up email for
bonsai.
o You need to set up an account that will accept the email from Bonsai
and process it. When you have set up that user's .forward file to run
the script that handles the email. This is what a sample .forward
file looks like, please note that the script takes one argument which
is the directory where all of your bonsai data resides:
"|/home/httpd/html/webtools/bonsai/handleCheckinMail.pl /home/httpd/html/webtools/bonsai"
o You need to set up an alias for "bonsai-checkin-daemon" to the
account that will process the email. This is where the mail will be
sent to when checking into CVS. Also create an alias called
"bonsai-daemon" for error mail.
o You need to add the script that creates the email to the loginfo
file in CVS. To do this you can add a line to the loginfo file that
looks like this:
# For bonsai
ALL /home/httpd/html/webtools/bonsai/dolog.pl -r /usr/local/cvsroot bonsai-checkin-daemon@your-bonsai-host.your-company.com
#
This will generate a piece of email every time someone checks in code
and should be handled with the setup above.

File diff suppressed because it is too large Load Diff

View File

@@ -1,23 +0,0 @@
From: bonsai-daemon
To: %name%
Subject: [Bonsai] Hey! You checked in while the tree was closed!
Mime-Version: 1.0
Content-Type: text/html
<HTML>
<H1>Boy, you better have had permission!</H1>
You just checked into <tt>%dir%</tt> the files <tt>%files%</tt>. The
tree is currently frozen. You better have had permission from the build group
to make a checkin; otherwise, you're in deep doo-doo.
<P>
Your contact info and other vital data is listed below. Please
<a href=http://warp/bonsai/profile.cgi?person=%name%>update</a>
this info <b>immediately</b> if it is at all inaccurate or incorrect.
<hr>
%profile%

File diff suppressed because it is too large Load Diff

View File

@@ -1,63 +0,0 @@
dnl Process this file with autoconf to produce a configure script.
AC_INIT
PERL_VERSION=5.006
dnl Checks for programs.
AC_PATH_PROGS(PERL, $PERL perl5 perl)
AC_MSG_CHECKING([for minimum required perl version >= $PERL_VERSION])
_perl_version=`PERL_VERSION=$PERL_VERSION $PERL -e 'print "$]"; if ($] >= $ENV{PERL_VERSION}) { exit(0); } else { exit(1); }' 2>&5`
_perl_res=$?
AC_MSG_RESULT([$_perl_version])
if test "$_perl_res" != 0; then
AC_MSG_ERROR([Perl $PERL_VERSION or higher is required.])
fi
AC_MSG_CHECKING([for full perl installation])
_perl_archlib=`$PERL -e 'use Config; if ( -d $Config{archlib} ) { exit(0); } else { exit(1); }' 2>&5`
_perl_res=$?
if test "$_perl_res" != 0; then
AC_MSG_RESULT([no])
AC_MSG_ERROR([Cannot find Config.pm or \$Config{archlib}. A full perl installation is required.])
else
AC_MSG_RESULT([yes])
fi
AC_PATH_PROG(CO, co)
AC_PATH_PROG(CVS, cvs)
AC_PATH_PROG(CVSGRAPH, cvsgraph)
AC_PATH_PROG(RLOG, rlog)
AC_PATH_PROG(RCSDIFF, rcsdiff)
if test -z "$CO" -o -z "$CVS" -o -z "$RLOG" -o -z "$RCSDIFF"; then
AC_MSG_ERROR([Missing one or more required binaries.])
fi
dnl Checks for libraries.
dnl Checks for header files.
dnl Checks for typedefs, structures, and compiler characteristics.
dnl Checks for library functions.
AC_CHECK_PERL_MODULES([ \
CGI::Carp \
DBD::mysql \
DBI \
Date::Format \
Date::Parse \
File::Basename \
File::Path \
Mail::Internet \
Mail::Mailer \
Time::Local \
],,_missing_perl_mod=1)
if test -n "$_missing_perl_mod"; then
AC_MSG_ERROR([Missing one or more required Perl modules.])
fi
AC_OUTPUT(Makefile)
AC_OUTPUT_COMMANDS([echo type 'make install' to install bonsai])

View File

@@ -1,27 +0,0 @@
<html> <head>
<title>Changing other people's contact info.</title>
</head>
<body>
<h1>Changing other people's contact info.</h1>
Occasionally, you need to change the "contact info" listed for some
other person. (Like, they just called you on their cellphone from the
horrible traffic accident they just got in, and need you to go on the
hook for them.) Well, it's easy. Go ahead onto their contact page,
change the contact info field, and type your own username and UNIX
password to the form. It'll work.
<P>
Note that you're only allowed to change the "Current Contact Info"
field this way. It won't let you change anything else.
<hr>
<a href="toplevel.cgi" target=_top>Back to the top of Bonsai</a>
<hr>
<address><a href="http://home.netscape.com/people/terry/">Terry Weissman &lt;terry@netscape.com&gt;</a></address>
<!-- hhmts start -->
Last modified: Wed Oct 30 13:03:35 1996
<!-- hhmts end -->
</body> </html>

View File

@@ -1,165 +0,0 @@
#!/usr/bin/perl -w
#
# Version: MPL 1.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is parsecheckins.pl, a Bonsai-output -> HTML parser.
#
# The Initial Developer of the Original Code is
# J. Paul Reed (preed@sigkill.com).
#
# Portions created by the Initial Developer are Copyright (C) 2003
# the Initial Developer. All Rights Reserved.
#
# Contributor(s): Dave Miller <justdave@netscape.com>
#
use strict;
## Suggested usage:
##
## cat saved_bonsai_query_output | perl parsecheckins.pl > output 2> logfile &
## tail -f logfile
##
## Script progress gets dumped on stderr
## hostname of your Bugzilla installation
my $bzHost = 'your.bugzilla.host';
## Set to 0 if you don't want to query your bugzilla host for the titles of
## the bugs associated with these checkins
my $getTitles = 1;
## Set to the lowest valid bug number in your bz install; for non-bmo
## instals, should probably be 0
my $lowBugnum = 1000;
## SCRIPT STARTS HERE ##
my $input;
while (<>) {
$input .= $_;
}
## remove the header
$input =~ s/.*?<TH WIDTH=\d+\%>Description\n.*?\n//is;
## remove the footer
$input =~ s/<\/TABLE><br>.*//is;
## remove lines that contain nothing but starting a new table
$input =~ s/<\/TABLE><TABLE.*?\n//igs;
## remap the linefeeds so they only occur between <tr> blocks
$input =~ s/\n/ /gs;
$input =~ s/<\/tr>\s*<tr>/<\/tr>\n<tr>/igs;
## ok, at this point, each <tr> is on a line by itself.
## let's load this into an array of lines.
my @lines = split(/\n/,$input);
## strip all lines that don't contain a description
@lines = grep { $_ =~ /<TD WIDTH=4\d% VALIGN=TOP/ } @lines;
## strip all lines that are checkins that don't reference a bugzilla bug
## comment this line out if you want bugs that don't reference a bz bug
@lines = grep { $_ =~ /show_bug.cgi/ } @lines;
## now we do a bunch of transformations on each line:
@lines = map {
## remove all rowspan markers (simplifies some of the parsing below)
s/rowspan=[^ >]+//ig;
## Strip off the leading and ending <tr>'s
s/^<tr>//i;
s/<\/tr>$//i;
## Strip out extra <br>s
s/<br>/ /ig;
## remove the date column because we don't need it
s/<TD width=2\%.*?<td/<td/i;
## remove the email address link; if you want to keep the email address,
## comment out the 2nd regex and use the first (commented out) one
# s/<a[^>]+>(.*?)<\/a>/$1/i;
s/<td width=\d+\%><a[^>]+>(.*?)<\/a>\s*<td/<td/i;
## remove the filename column because we don't need it
s/<td width=45\%.*?<td/<td/i;
## remove the version number, branch tag, and linecount columns
s/<td width=2\%>(?:<font|\&nbsp).*?<td/<td/i;
s/<td width=2\%><tt>.*?<td/<td/i;
s/<td width=2\%>(?:<font|\&nbsp).*?<td/<td/i;
## Strip the last <td> preceding the commit message
s/<td width=\d+%\s+VALIGN=TOP\s*>//i;
## Strip off parts of the comment we don't care about
s/Patch by.+//;
s/Fix by.+//;
s/r=.+//;
s/a=.+//;
s/r,a=.+//;
s/r\/a.+//;
## Get the titles for bugs
if (/show_bug\.cgi\?id=(\d+)/) {
my $bugnum = $1;
if ($getTitles && $bugnum > $lowBugnum) {
## We use lynx so libwww doesn't have to get installed;
## yay for quick and dirty!
my $bugreport = `lynx -source http://$bzHost/show_bug.cgi?id=$bugnum`;
if ($bugreport =~ /<TITLE>Bug \d+ - ([^<]+)<\/TITLE>/i) {
my $title = $1;
print STDERR "$bugnum: $title\n";
s/(HREF="[^"]+")/$1 title="$title"/;
}
else {
print STDERR "BUG REPORT LACKS TITLE (not authorized to view?): "
. "$bugnum\n";
}
}
}
else {
print STDERR "NO BUG LINK FOUND: $_\n";
}
## Clean up extra spaces
s/^\s+//;
s/\s+$//;
## Standardize the separator format:
s/(\d+<\/A>)\s*.\s+/$1 \- /;
## return the final result; should be the comment message, titles and all.
$_;
} @lines;
## Uncomment this if you want the bugs listed oldest first
#@lines = reverse @lines;
## Play with the output format here; now, it gives an unenumerated html list
print "<ul>\n";
foreach my $line (@lines) {
print "<li>$line</li>\n";
}
print "</ul>\n";

View File

@@ -1,92 +0,0 @@
#!/usr/bin/perl -w
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
use strict;
require 'CGI.pl';
use vars qw($CloseTimeStamp);
print "Content-type: text/html\n\n";
LoadCheckins();
my $maxsize = 400;
PutsHeader("Beancounter central", "Meaningless checkin statistics");
print "
<TABLE BORDER CELLSPACING=2><TR>
<TH>Tree closed</TH>
<TH>Number<BR>of<BR>people<BR>making<BR>changes</TH>
<TH COLSPAN=2>Number of checkins</TH>
</TR>\n";
my @list = ();
my $globstr = DataDir() . '/batch-*[0-9].pl';
foreach my $i (glob($globstr )) {
if ($i =~ /(\d+)/) {
push @list, $1;
}
}
@list = sort { $b <=> $a } @list;
my $first = 1;
my $biggest = 1;
my %minfo; # meaninglesss info
foreach my $i (@list) {
my $batch = DataDir() . "/batch-$i.pl";
require $batch;
$minfo{$i}{num} = scalar @::CheckInList;
$biggest = $minfo{$i}{num} if ($minfo{$i}{num} > $biggest);
if ($first) {
$minfo{$i}{donetime} = "Current hook";
$first = 0;
} else {
$minfo{$i}{donetime} = MyFmtClock($::CloseTimeStamp);
}
my %people = ();
foreach my $checkin (@::CheckInList) {
my $info = eval("\\\%$checkin");
$people{$$info{'person'}} = 1;
}
$minfo{$i}{numpeople} = scalar keys(%people);
}
foreach my $i (@list) {
print "<tr>\n";
print "<TD>$minfo{$i}{donetime}</TD>\n";
print "<TD ALIGN=RIGHT>$minfo{$i}{numpeople}</TD>\n";
print "<TD ALIGN=RIGHT>$minfo{$i}{num}</TD>\n";
printf "<TD><table WIDTH=%d bgcolor=green>\n",
($minfo{$i}{num} * $maxsize) / $biggest;
print "<tr><td>&nbsp;</td></tr></table></TD>\n";
print "</TR>\n";
}
print "</table>\n";
PutsTrailer();
exit;

View File

@@ -1,133 +0,0 @@
#!/usr/bin/perl
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
require 'globals.pl';
sub add_module {
my ($str) = @_;
my $module;
$str =~ s/^\s*(\S+)\s*(-\S*\s*)?//;
$module = $1;
$::Modules{$module} = $str;
}
sub init_modules {
my ($cvsroot, $curline);
my $cvscommand = Param('cvscommand');
undef %::Modules;
$cvsroot = $::TreeInfo{$::TreeID}{'repository'};
$::CVSCOMMAND = "$cvscommand -d $cvsroot checkout -c";
open(MODULES, "$::CVSCOMMAND |") ||
die "Unable to read modules list from CVS\n";
$curline = "";
while (<MODULES>) {
chop;
if (/^\s+/) {
# Replace any leading whitespace with a single space before
# appending to curline. This is necessary for long All lines
# which get split over reads from the CVSCOMMAND.
# The match of oldlist and newlist in find_dirs will fail
# if this is not done.
s/^\s+/ /;
$curline .= $_;
} else {
add_module($curline) if ($curline);
$curline = $_;
}
}
add_module($curline) if ($curline);
close(MODULES);
}
sub init {
$::TreeID = $ARGV[0];
die "Must specify a treeid...\n"
unless ($::TreeID);
LoadTreeConfig();
$::ModuleName = $::TreeInfo{$::TreeID}{'module'};
init_modules();
die "modules file no longer includes `$::ModuleName' ???
Used `$::CVSCOMMAND' to try to find it\n"
unless (exists($::Modules{$::ModuleName}));
$::DataDir = DataDir();
}
sub find_dirs {
my ($oldlist, $list, $i);
$oldlist = '';
$list = $::ModuleName;
until ($list eq $oldlist) {
$oldlist = $list;
$list = '';
foreach $i (split(/\s+/, $oldlist)) {
if (exists($::Modules{$i})) {
$list .= "$::Modules{$i} ";
# Do an undef to prevent infinite recursion.
undef($::Modules{$i});
} else {
$list .= "$i ";
}
}
$list =~ s/\s+$//;
}
return ($list);
}
sub create_legal_dirs {
my ($dirs);
$list = find_dirs();
Lock();
unless (open(LDIR, "> $::DataDir/legaldirs")) {
Unlock();
die "Couldn't create $::DataDir/legaldirs";
}
chmod(0666,"$::DataDir/legaldirs");
foreach $i (split(/\s+/, $list)) {
print LDIR "$i\n";
print LDIR "$i/*\n";
}
close(LDIR);
Unlock();
}
##
## Main program...
##
Log("Attempting to recreate legaldirs...");
init();
create_legal_dirs();
Log("...legaldirs recreated.");
exit(0);

View File

@@ -1,130 +0,0 @@
#!/usr/local/bin/perl -w
#use diagnostics;
use strict;
$::accessconfig = [
#{
# 'cvsroot' => 'neutron:/var/cvs',
# 'branch' => '#-all-#',
# 'location' => {
# 'file' => [ 'test/foo.sh' ],
# },
# 'close' => 'Hey! You Suck!',
# 'bless' => {
# 'user' => [ 'thj' ],
# },
# 'permit' => {
# 'unix_group' => [ 'ebuildrel' ],
# },
# 'deny_msg' => 'buildrel group has been naughty',
#},
#{
# 'cvsroot' => 'neutron:/var/cvs',
# 'branch' => 'TRUNK',
# 'location' => {
# 'file' => [ 'test/foo.sh' ],
# },
# 'close' => 'BRANCH closed, so go away.',
# 'bless' => {
# 'bonsai_group' => [ 'test-2' ],
# },
# 'permit' => {
# 'bonsai_group' => [ 'test-2' ],
# },
# 'deny_msg' => 'i don\'t like people',
#},
#{
# 'cvsroot' => 'neutron:/var/cvs',
# 'branch' => '#-all-#',
# 'location' => {
# 'module' => [ ],
# 'directory' => [ 'test/foo/', 'blah/blah/blah' ],
### 'file' => [ 'test/foo.sh' ],
# },
# 'close' => 0,
## 'permit' => {
### 'unix_group' => [ ],
### 'bonsai_group' => [ ],
### 'user' => [ ],
## },
## 'deny' => {
## 'unix_group' => [ ],
## 'bonsai_group' => [ ],
## 'user' => [ ],
## },
## 'bless' => {
## 'unix_group' => [ ],
## 'bonsai_group' => [ 'blessed' ],
## 'user' => [ ],
## },
#},
#{
# 'cvsroot' => 'neutron:/var/cvs',
# 'branch' => 'TRUNK',
# 'location' => {
# 'file' => [ 'test/foo.sh' ],
# },
# 'close' => 'Blah, blah, blah.',
# 'bless' => {
# 'user' => [ 'thj' ],
# },
# 'permit' => {
# 'user' => [ 'thj' ],
# },
#},
##{
## 'cvsroot' => '#-all-#',
## 'branch' => '#-all-#',
## 'location' => {
## 'directory' => [ 'CVSROOT' ],
## },
### 'close' => "TESTING TESTING TESTING",
## 'permit' => {
## 'unix_group' => [ 'buildrel' ],
## },
##},
##{
## 'cvsroot' => 'neutron:/var/cvs',
## 'branch' => 'XML_Dev_BRANCH',
## 'location' => {
## 'module' => [ ],
## 'directory' => [ ],
## 'file' => [ 'test/foo.sh' ],
## },
## 'close' => 0,
## 'permit' => {
## 'unix_group' => [ ],
## 'bonsai_group' => [ ],
## 'user' => [ ],
## },
## 'deny' => {
## 'unix_group' => [ ],
## 'bonsai_group' => [ ],
## 'user' => [ ],
## },
## 'bless' => {
## 'unix_group' => [ ],
## 'bonsai_group' => [ ],
## 'user' => [ ],
## },
##},
##{
## 'cvsroot' => 'neutron:/var/cvs2',
## 'branch' => 'FOO',
## 'location' => {
## 'module' => [ 'Vermouth' ],
## },
## 'close' => 0,
##},
{
'cvsroot' => 'neutron:/var/cvs',
'branch' => 'T2',
'location' => {
# 'module' => [ 'Vermouth' ],
# 'file' => [ 'bmsrc/apps/ams/foo.pl' ],
'directory' => [ 'mirror-test' ]
},
# 'permit' => { },
# 'close' => "closed",
},
];
return 1;

View File

@@ -1,214 +0,0 @@
#!/usr/local/bin/perl -w
use strict;
use Sys::Hostname;
use Cwd;
#use diagnostics ;
#use Data::Dumper;
#use Time::HiRes;
#
# It's tempting to use environment variables for things like USER
# and CVSROOT; however, don't. Using the builtin CVS variables is
# a better idea, especially if you are using a three entry
# $CVSROOT/CVSROOT/passwd (i.e., cvs runs as a local user instead of
# the actual user)
#
$::cvsrootdir = shift @ARGV;
#
# I'd really rather have all my "use" and "require" statements before
# anything else, but since I want to keep the bonsai-global.pm module
# checked into $CVSROOT/CVSROOT, I need to do the ugly "parse" the
# root first, then require the module foo you see here.
#
require "$::cvsrootdir/CVSROOT/bonsai-global.pm";
require "$::cvsrootdir/CVSROOT/bonsai-config.pm";
#$::start = Time::HiRes::time;
$::cwd = cwd;
$::user = shift @ARGV;
$::time = time;
$::directory = shift @ARGV ;
$::directory =~ s/^$::cvsrootdir\/?(.*)$/$1/;
$::cvsroot = hostname() . ":" . $::cvsrootdir;
#print "#" x 80, "\n", Dumper(\@ARGV), "#" x 80, "\n";
###
### directory/file specific actions/checks
###
if ($::directory eq "CVSROOT") {
$::modulesfile = "./modules";
if (-e $::modulesfile ) {
$::modules = `cat $::modulesfile`;
$::modules_hash = &BuildModuleHash($::modules) ;
&CheckCircularity($::modules_hash);
print "\nno circular references found in CVSROOT/modules\n";
} else {
print "\nno changes to CVSROOT/modules\n";
}
if (-e "./bonsai-mirrorconfig.pm") {
require "./bonsai-mirrorconfig.pm";
print "CVSROOT/bonsai-mirrorconfig.pm has changed and appears to be OK\n";
} else {
print "no changes to CVSROOT/bonsai-mirrorconfig.pm\n";
}
if (-e "./bonsai-accessconfig.pm") {
require "./bonsai-accessconfig.pm";
print "CVSROOT/bonsai-accessconfig.pm has changed and appears to be OK\n";
} else {
print "no changes to CVSROOT/bonsai-accessconfig.pm\n";
}
print "\n";
}
###
### Log checkin to database
###
open (ENTRIES, "<CVS/Entries") || die "Can't open CVS/Entries" ;
while (<ENTRIES>) {
chomp;
my @line = split /\//;
next if &get('code', @line);
my $branch = &get('tag', @line);
my $oldrev = &get('rev', @line);
my $file = &get('file', @line);
if (&intersect([$file], \@ARGV)) {
# for my $f (@ARGV) { # Sometimes I really hate CVS
# if ($file eq $f) {
$::files .= $branch.":".$oldrev.":".$file." | ";
push @{$::change_ref->{$branch}}, $file;
# }
}
}
close ENTRIES;
$::files =~ s/^(.*) \| $/$1/;
#print "\$files -- $::files\n";
&connect();
#print Time::HiRes::time - $::start, "\n";
my $ac = eval &retrieve("expanded_accessconfig");
&log_commit($::cwd, $::user, $::time, $::directory, $::cvsroot, 'checking permissions', $::files);
#print Dumper($::change_ref);
for my $i (0..$#{$ac}) {
if (&rule_applies($ac->[$i], $::change_ref)) {
if ( $ac->[$i]->{'close'} && !&included($::user, $ac->[$i]->{'bless'}) ) { push @::closed, $i }
if ( &included($::user, $ac->[$i]->{'deny'}) ) { push @::deny, $i }
if ( $ac->[$i]->{'permit'} && !&included($::user, $ac->[$i]->{'permit'}) ) { push @::deny, $i }
}
}
@::eol = @{&branch_eol($::cvsroot, keys(%$::change_ref))};
if (scalar @::eol) {
my $branch = join ", ", @::eol;
$::msg->{'denied'}->{'eol'} =~ s/#-branch-#/$branch/;
&print_deny_header('eol');
map { print "branch: $_\nfiles:\n"; map { print " $::directory/$_\n" } @{$::change_ref->{$_}} } @::eol;
print &center("", "#"), "\n";
&update_commit($::cwd, $::user, $::time, $::directory, $::cvsroot, 'branch eol');
exit 1;
}
if (scalar @::closed) {
my $branch = join ", ", &uniq(map{ $ac->[$_]->{'branch'} } @::closed);
$::msg->{'denied'}->{'closed'} =~ s/#-branch-#/$branch/;
&print_deny_header('closed');
&print_blocking_rules('close', @::closed);
&update_commit($::cwd, $::user, $::time, $::directory, $::cvsroot, 'branch closed');
exit 1;
}
if (scalar @::deny) {
&print_deny_header('access');
&print_blocking_rules('deny_msg', @::deny);
&update_commit($::cwd, $::user, $::time, $::directory, $::cvsroot, 'permission denied');
exit 1;
}
&update_commit($::cwd, $::user, $::time, $::directory, $::cvsroot, 'checkin permitted');
&disconnect();
#print Time::HiRes::time - $::start, "\n\n";
###############
# subroutines #
###############
sub print_blocking_rules {
my ($key, @array) = @_;
my $rules = join ", ", @array;
$rules =~ s/^([0-9, ]*[0-9]+), ([0-9]+)$/$1 and $2/;
print "access denied by rule", $#array?"s":"" , " $rules.\n\n";
map { print "$_. ", $ac->[$_]->{$key}?$ac->[$_]->{$key}:'<no reason given>', "\n" } @array;
print &center("", "#"), "\n";
}
sub print_deny_header {
my ($x) = @_;
print &center("", "#"), "\n";
print &center($::msg->{'denied'}->{'generic'}), "\n";
print &center("", "="), "\n";
print &center($::msg->{'denied'}->{$x}), "\n";
print &center("", "-"), "\n";
}
sub center {
my ($string, $chr, $width) = @_;
$chr = " " unless $chr;
$width = 50 unless $width;
my $half = ($width -length($string))/2;
$string = $chr x $half . $string . $chr x $half;
return $string;
}
sub line {
my ($chr, $width) = @_;
$chr = "-" unless $chr;
$width = 50 unless $width;
return $chr x $width;
}
sub included {
my ($user, $ph) = @_;
my $bga = &bonsai_groups($user);
my $uga = &unix_groups($user);
if (&intersect($bga, $ph->{'bonsai_group'}) ||
&intersect($uga, $ph->{'unix_group'}) ||
&intersect([$user, "#-all-#"], $ph->{'user'})) {
return 1;
}
return 0;
}
sub intersect { # find the intersection of N LIST references and return as a LIST
my %h;
map { map { $h{$_}++ } @$_ } @_;
return grep { $h{$_} > $#_ } keys %h;
}
sub rule_applies {
my ($ah, $ch_ref) = @_;
my $return = 0;
while (my ($b, $fa) = each (%$ch_ref)) {
if (($::cvsroot eq $ah->{'cvsroot'} || $ah->{'cvsroot'} eq "#-all-#") &&
($b eq $ah->{'branch'} || $ah->{'branch'} eq "#-all-#")) {
for my $f (@$fa) { # I would have like to have returned out of this
$return += &allowed($f, $ah->{'location'}); # loop at the first &allowed file, but when i did
} # the next call to the sub had ch_ref messed up and the each failed.
}
}
return $return;
}

View File

@@ -1,37 +0,0 @@
#!/usr/local/bin/perl -w
#use diagnostics;
use strict;
$::debug = 0;
$::debug_level = 5;
$::default_db = "development";
$::default_column = "value";
$::default_key = "id";
%::db = (
"production" => {
"dsn" => "dbi:mysql:database=bonsai;host=bonsai2",
"user" => "rw_bonsai",
"pass" => "password",
},
"development" => {
"dsn" => "dbi:mysql:database=bonsai_dev;host=bonsai2",
"user" => "rw_bonsai_dev",
"pass" => "password",
},
);
$::msg = {
"denied" => {
"generic" => "CHECKIN ACCESS DENIED",
"eol" => "BRANCH (#-branch-#) IS NO LONGER ACTIVE",
"closed" => "BRANCH (#-branch-#) IS TEMPORARILY CLOSED",
"access" => "INSUFFICIENT PERMISSION TO COMMIT",
},
};
@::mirrored_checkin_reqex = (
"^mirrored checkin from \\S+: ",
"^mirrored add from \\S+: ",
"^mirrored remove from \\S+: ",
" \\(mirrored checkin from \\S+\\)\$",
" \\(mirrored add from \\S+\\)\$",
" \\(mirrored remove from \\S+\\)\$",
);
return 1;

View File

@@ -1,761 +0,0 @@
#!/usr/local/bin/perl -w
#use diagnostics;
use DBI;
use strict;
use Time::HiRes qw(time);
use Text::Soundex;
#use Data::Dumper;
sub checkin {
my ($cwd, $user, $time, $dir, $cvsroot, $log, $change, $hashref) = @_;
my ($db, $ci_id, %ch_id, $branch, $file, $f_ref, $c_ref);
unless ($db = ${$hashref}{"db"}) { $db = $::default_db }
unless ($::last_insert_id{$db}) { $::last_insert_id{$db} = $::dbh{$db}->prepare("select LAST_INSERT_ID()") }
$::dbh{$db}->do("insert into checkin set user_id=" . &id('user', $user)
. ", directory_id=" . &id('directory', $dir)
. ", log_id=" . &id('log', $log)
. ", cvsroot_id=" . &id('cvsroot', $cvsroot)
. ", time=$time");
$ci_id = $::dbh{$db}->selectrow_array($::last_insert_id{$db});
###
### The Following should be put into a subroutine
###
my $insert_change = $::dbh{$db}->prepare("insert into `change` (checkin_id, file_id, oldrev, newrev, branch_id) values (?, ?, ?, ?, ?)");
# my $insert_change_map = $::dbh{$db}->prepare("insert into checkin_change_map (checkin_id, change_id) values (?, ?)");
while (($branch, $f_ref) = each %$change) {
while (($file, $c_ref) = each %$f_ref) {
$insert_change->execute($ci_id, &id('file', $file), ${$c_ref}{'old'}, ${$c_ref}{'new'}, &id('branch', $branch));
$ch_id{$file} = $::dbh{$db}->selectrow_array($::last_insert_id{$db});
# $insert_change_map->execute($ci_id, $::dbh{$db}->selectrow_array($::last_insert_id{$db}));
}
}
###
### End "needs to be in a subroutine"
###
return ($ci_id, \%ch_id);
}
sub insert_mirror_object {
# print "inserting mirror_object...\n";
my ($db, $mirror_id, $bro, $hashref);
$db = $::default_db;
unless ($::last_insert_id{$db}) { $::last_insert_id{$db} = $::dbh{$db}->prepare("select LAST_INSERT_ID()") }
my $insert_mirror = $::dbh{$db}->prepare("insert into mirror (checkin_id, branch_id, cvsroot_id, offset_id, status_id) values (?, ?, ?, ?, ?)");
my $insert_mirror_map = $::dbh{$db}->prepare("insert into mirror_change_map (mirror_id, change_id, type_id, status_id) values (?, ?, ?, ?)");
while (($bro, $hashref) = each %::mirror_object) {
my ($branch, $cvsroot, $offset) = split /@/, $bro;
$offset = '' unless $offset;
$insert_mirror->execute($::id, &id('branch', $branch), &id('cvsroot', $cvsroot), &id('offset', $offset), &id('status', 'building_mirror'));
my $mirror_id = $::dbh{$db}->selectrow_array($::last_insert_id{$db});
# print "mirror_id: $mirror_id\nbranch: $branch\ncvsroot: $cvsroot\noffset: $offset\n";
while (my($ch_id, $type) = each %$hashref) {
$insert_mirror_map->execute($mirror_id, $ch_id, &id('type', $type), &id('status', &nomirrored($::logtext)?'nomirror':'pending'));
$::dbh{$db}->do("UPDATE `mirror` SET status_id = ? WHERE id = ?", undef, &id('status', 'pending'), $mirror_id);
# print "\t-- $ch_id --> $type\n";
}
}
}
sub pop_rev {
# used to migrate to a different schema
my $db = $::default_db ;
my $new_total = 0;
my $old_total = 0;
my $pop_newrev = $::dbh{$db}->prepare("update `change` set newrev = ? where newrev_id = ?");
my $pop_oldrev = $::dbh{$db}->prepare("update `change` set oldrev = ? where oldrev_id = ?");
my $revisions = $::dbh{$db}->selectall_arrayref("select id, value from revision");
for my $row (@$revisions) {
my ($id, $value) = @$row;
print "$id->$value: ";
my $new = $pop_newrev->execute($value, $id);
my $old = $pop_oldrev->execute($value, $id);
$new_total += $new;
$old_total += $old;
print "new($new) -- old($old)\n";
}
print "\nnew_total = $new_total\nold_total = $old_total\n";
}
sub pop_chid {
# used to migrate to a different schema
my $db = $::default_db ;
my $new_total = 0;
my $old_total = 0;
my $pop_chid = $::dbh{$db}->prepare("update `change` set checkin_id = ? where id = ?");
my $map = $::dbh{$db}->selectall_arrayref("select checkin_id, change_id from checkin_change_map");
for my $row (@$map) {
my ($check, $change) = @$row;
print "$check->$change: ";
my $new = $pop_chid->execute($check, $change);
$new_total += $new;
print "changed($new)\n";
}
print "\nnew_total = $new_total\nold_total = $old_total\n";
}
sub bonsai_groups {
my ($user) = @_;
unless ($::groups{'bonsai'}) {
my $db = $::default_db ;
my $groups = $::dbh{$db}->selectcol_arrayref(
"SELECT g.value FROM `group_user_map` m, `group` g, `user` u " .
"WHERE m.user_id = u.id AND m.group_id = g.id AND u.value = ?",
undef, $user);
$::groups{'bonsai'} = [ uniq(@$groups), "#-all-#" ];
}
return $::groups{'bonsai'};
}
sub unix_groups {
my ($user) = @_;
unless ($::groups{'unix'}) {
my @groups;
my $gid = (getpwnam($user))[3];
my $grp = scalar getgrgid($gid) if $gid;
return @groups unless $grp;
push @groups, $grp;
while (my ($name, $passwd, $gid, $members) = getgrent) {
for my $m (split /\s/, $members) {
if ($m eq $user) {
push @groups, $name;
next;
}
}
}
endgrent;
$::groups{'unix'} = [ &uniq(@groups), "#-all-#" ];
}
return $::groups{'unix'};
}
sub branch_eol {
my ($r, @ba) = @_;
my $where =
my $db = $::default_db ;
return $::dbh{$db}->selectcol_arrayref(
"SELECT b.value FROM `cvsroot_branch_map_eol` m, `cvsroot` r, `branch` b " .
"WHERE m.cvsroot_id = r.id AND m.branch_id = b.id AND " .
"(b.value = ?".(" OR b.value = ?" x $#ba).")",
undef, @ba);
}
sub db_map {
my ($table, @value) = @_;
my ($set, $where);
my $db = $::default_db ;
$set = &db_map_clause($table, \@value, " , ");
$where = &db_map_clause($table, \@value, " AND ");
$::dbh{$db}->do("insert into `$table` set $set") unless &db_mapped($table, $where);
}
sub db_demap {
my ($table, @value) = @_;
my $where = &db_map_clause($table, \@value, " AND ");
my $db = $::default_db ;
$::dbh{$db}->do("delete from `$table` where $where");
}
sub db_mapped {
my ($table, $where) = @_;
my $db = $::default_db;
my $mapped = $::dbh{$db}->selectrow_array("select count(*) from `$table` where $where");
return $mapped;
}
sub db_map_clause {
my ($table, $value, $joiner) = @_;
my $string;
$joiner = " " unless $joiner;
my ($names, $rest) = split /_map/, $table;
my (@name) = split /_/, $names;
for my $i (0..$#name) {
$string .= "`".$name[$i]."_id` = ".&id($name[$i], $$value[$i]).$joiner;
}
$string =~ s/$joiner$/ /;
#print "--> $string\n";
return $string;
}
sub log_commit {
my ($cwd, $user, $time, $dir, $cvsroot, $status, $files, $hashref) = @_;
my $db;
unless ($db = ${$hashref}{"db"}) { $db = $::default_db }
#&debug_msg("\n###\n### ".$::dbh{$db}->quote($status)."\n###\n", 9);
$::dbh{$db}->do("insert into temp_commitinfo set user_id=" . &id('user', $user)
. ", directory_id=" . &id('directory', $dir)
. ", cvsroot_id=" . &id('cvsroot', $cvsroot)
. ", files=" . $::dbh{$db}->quote($files)
. ", cwd=" . $::dbh{$db}->quote($cwd)
. ", status=" . $::dbh{$db}->quote($status)
. ", time=$time");
}
sub collapse_HOH {
my ($HOHref, $arrayref) = @_;
#print "#" x 80, "\n", Dumper($HOHref);
while (my ($key, $hashref) = each %$HOHref) {
for my $subkey (@$arrayref) {
# delete $HOHref->{$key} unless $hashref->{$subkey} ;
delete $HOHref->{$key} unless defined $hashref->{$subkey} ;
}
}
#print Dumper($HOHref), "#" x 80 , "\n";
}
sub collapse_HOHOH {
my ($HOHOHref, $arrayref) = @_;
#print "#" x 80, "\n", Dumper($HOHOHref);
while (my ($key, $hashref) = each %$HOHOHref) {
&collapse_HOH($hashref, $arrayref);
}
#print Dumper($HOHOHref), "#" x 80 , "\n";
}
sub update_commit {
my ($cwd, $user, $time, $dir, $cvsroot, $status, $hashref) = @_;
my $db;
unless ($db = ${$hashref}{"db"}) { $db = $::default_db }
unless ($::update_temp_commitinfo{$db}) {
$::update_temp_commitinfo{$db} = $::dbh{$db}->prepare("update temp_commitinfo set status = ?"
. " where cwd = ?"
. " and user_id = ?"
. " and from_unixtime(time) > date_sub(from_unixtime(?), interval 2 hour)"
. " and directory_id = ?"
. " and cvsroot_id = ?")
}
$::update_temp_commitinfo{$db}->execute($status, $cwd, &id('user', $user), $time, &id('directory', $dir), &id('cvsroot', $cvsroot));
}
sub delete_commit {
my ($cwd, $user, $time, $dir, $cvsroot, $hashref) = @_;
my $db;
unless ($db = ${$hashref}{"db"}) { $db = $::default_db }
$::dbh{$db}->do("delete from temp_commitinfo where cwd=" . $::dbh{$db}->quote($cwd)
. " and user_id=" . &id('user', $user)
. " and from_unixtime(time) > date_sub(from_unixtime($time), interval 2 hour)"
. " and directory_id=" . &id('directory', $dir)
. " and cvsroot_id=" . &id('cvsroot', $cvsroot));
}
sub log_performance {
my ($table, $ci_id, $time, $hashref) =@_;
my $db;
unless ($db = ${$hashref}{"db"}) { $db = $::default_db }
$::dbh{$db}->do("insert into `$table` set checkin_id=" . $::dbh{$db}->quote($ci_id)
. ", time=$time");
}
sub store {
my ($table, $value, $other_ref, $hashref) = @_;
my ($db, $column, $other, $stored_value, @bind);
unless ($db = ${$hashref}{"db"}) { $db = $::default_db }
unless ($column = ${$hashref}{"column"}) { $column = $::default_column }
while (my ($col, $val) = each %$other_ref) {
$other .= ", ".$col ." = ? ";
if ($col =~ /.*_id$/) {
$col =~ s/_id$//;
push @bind, &id($col, $val);
} else {
push @bind, $val;
}
}
$other .= " ";
$Data::Dumper::Indent = 0;
$Data::Dumper::Terse = 1;
$stored_value = Dumper($value);
#print "insert into $table set value = ? $other\n";
#for my $i (@bind) { print "$i\n" }
$::dbh{$db}->do("insert into `$table` set value = ? $other", undef, $stored_value, @bind);
}
sub retrieve {
my ($table, $where_ref, $hashref) = @_;
my ($db, $column, $value, $where, @bind);
unless ($db = ${$hashref}{"db"}) { $db = $::default_db }
unless ($column = ${$hashref}{"column"}) { $column = $::default_column }
while (my ($col, $val) = each %$where_ref) {
$where .= $col ." = ? AND ";
if ($col =~ /.*_id$/) {
$col =~ s/_id$//;
push @bind, &id($col, $val);
} else {
push @bind, $val;
}
}
$where .= "1";
$value = $::dbh{$db}->selectrow_array("SELECT $column FROM `$table` WHERE $where ORDER BY id DESC LIMIT 1", undef, @bind);
#print "SELECT $column FROM $table WHERE $where ORDER BY id DESC LIMIT 1", @bind;
#for my $i (@bind) { print "$i\n" }
return $value;
}
sub id {
my ($table, $value, $hashref) = @_;
my ($db, $column, $key, $id);
unless ($db = ${$hashref}{"db"}) { $db = $::default_db }
unless ($column = ${$hashref}{"column"}) { $column = $::default_column }
unless ($key = ${$hashref}{"key"}) { $key = $::default_key }
unless ($::get_id{$db}{$table}) { $::get_id{$db}{$table} = $::dbh{$db}->prepare("select $key from `$table` where $column = ?")}
unless ($id = $::dbh{$db}->selectrow_array($::get_id{$db}{$table}, "", ($value))) {
unless ($::insert_value{$db}{$table}) { $::insert_value{$db}{$table} = $::dbh{$db}->prepare("insert into `$table` ($column) values (?)") }
unless ($::last_insert_id{$db}) { $::last_insert_id{$db} = $::dbh{$db}->prepare("select LAST_INSERT_ID()") }
$::insert_value{$db}{$table}->execute($value);
$id = $::dbh{$db}->selectrow_array($::last_insert_id{$db});
}
}
sub connect {
my $db;
unless (($db) = @_) { $db = $::default_db }
$::dbh{$db} = DBI->connect($::db{$db}{'dsn'}, $::db{$db}{'user'}, $::db{$db}{'pass'}, { PrintError => 1, RaiseError => 1 })
}
sub disconnect {
my $db;
unless (($db) = @_) { $db = $::default_db }
$::dbh{$db}->disconnect();
}
sub debug_msg {
my ($msg, $level, $hashref) = @_;
my ($showtime, $timeformat, $prefix);
my %time = (
"epoch" => time,
"local" => scalar localtime,
);
return $time{'epoch'} unless $::debug;
unless ($showtime = ${$hashref}{"showtime"}) { $showtime = 'no' }
unless ($timeformat = ${$hashref}{"timeformat"}) { $timeformat = 'local' }
unless ($prefix = ${$hashref}{"prefix"}) { $prefix = '#' }
if ($::debug_level >= $level) {
print $prefix x $level . " " unless $prefix =~ /none/i;
print $time{$timeformat}." " unless $showtime =~ /no/i;
print "$msg\n";
}
return $time{'epoch'};
}
sub get {
my ($item, @line) = @_;
my %i = (
'code' => 0,
'file' => 1,
'rev' => 2,
'time' => 3,
'opt' => 4,
'tag' => 5,
);
#print "=== $item : $i{$item} : $line[$i{$item}] -- " , Dumper(\@line);
if ($item eq "tag") {
$line[$i{$item}] = "TTRUNK" unless (defined $line[$i{$item}]);
$line[$i{$item}] =~ s/^T//;
}
# if ($item eq "rev") {
# $line[$i{$item}] = "NONE" if $line[$i{$item}] eq "0";
## $line[$i{$item}] =~ s/^-//;
# }
return $line[$i{$item}];
}
sub BuildModuleHash {
my ($modules) = @_;
my $modules_hash;
#print "$modules";
chomp $modules;
$modules =~ s/\s*#.*\n?/\n/g ; # remove commented lines
$modules =~ s/^\s*(.*)/$1/ ; # remove blank lines before module definitions
$modules =~ s/\s*\\\s*\n\s*/ /g ; # join lines continued with \
$modules =~ s/\n\s+/\n/g ; # remove leading whitespace
$modules =~ s/\s+-[^la]\s+\S+//g ; # get rid of the arguments (and flags) to flags other than 'a' and 'l'
$modules =~ s/\s+-[la]\s+/ /g ; # get rid of the 'a' and 'l' **** FIXME: l needs an ending or something ****
#print "---\n$modules\n---\n";
my @modules = split(/\n/, $modules);
for my $line (@modules) {
my @line = split(" ", $line);
my $name = shift @line ;
$modules_hash->{$name} = [ @line ];
undef $name;
undef $line;
undef @line;
}
#print Dumper($modules_hash);
return $modules_hash;
}
sub uniq {
my @list = @_;
my %hash;
for my $item (@list) { $hash{$item}++ }
return keys(%hash);
}
sub FlattenHash {
# Remove duplicate entries in hash-arrays
my $hashref = $_[0];
my ($key, $value);
while (my ($key, $value) = each %$hashref) {
$$hashref{$key} = [ &uniq(@$value) ];
}
}
sub FormatModules {
my ($hashref, $cvsroot) = @_ ;
my %new_hash;
while (my ($module, $list) = each %$hashref) {
for my $item (@$list) {
if ( $item =~ s/^!// ) {
&make_module_regex(\%new_hash, $item, "exclude", $module, $cvsroot);
} else {
&make_module_regex(\%new_hash, $item, "include", $module, $cvsroot);
}
}
}
return \%new_hash;
}
sub make_module_regex {
my ($hash, $item, $type, $module, $cvsroot) = @_;
if ( -d "$cvsroot/$item" ) {
push @{$$hash{$module}{$type."_directory"}}, "^\Q$item\E/.+\$";
} else {
# $item =~ s/^(.*\/)?(.*)$/$1(Attic\/)?$2/;
push @{$$hash{$module}{$type."_file"}}, "^\Q$item\E\$";
}
}
sub format_mirrorconfig {
my ($mirrors) = @_;
for my $m (@$mirrors) {
for my $t ("mirror", "overwrite", "exclude") {
next unless $m->{$t};
while (my ($c, $a) = each %{$m->{$t}}) {
next unless ($c eq "directory" || $c eq "file");
my %n;
for my $i (@$a) {
if ($c eq "directory") {
push @{$n{"include_".$c}}, "^\Q$i\E/.+\$";
} else {
push @{$n{"include_".$c}}, "^\Q$i\E\$";
}
}
&FlattenHash(\%n);
$m->{$t}->{$c} = \%n;
}
}
}
}
sub format_accessconfig {
my ($aa) = @_;
for my $ah (@$aa) {
next unless $ah->{'location'};
while (my ($c, $a) = each %{$ah->{'location'}}) {
next unless ($c eq "directory" || $c eq "file");
my %n;
for my $i (@$a) {
if ($c eq "directory") {
push @{$n{"include_".$c}}, "^\Q$i\E/.+\$";
} else {
push @{$n{"include_".$c}}, "^\Q$i\E\$";
}
}
&FlattenHash(\%n);
$ah->{'location'}->{$c} = \%n;
}
}
}
sub expand_mirror_modules {
my ($mirrors) = @_;
my $modules_hashref;
for my $m (@$mirrors) {
my $r = $m->{'from'}->{'cvsroot'};
$modules_hashref->{$r} = eval &retrieve("modules", {"cvsroot_id" => $r});
for my $t ("mirror", "overwrite", "exclude") {
next unless $m->{$t};
my $a = $m->{$t}->{'module'};
next unless defined $a;
my %n;
for my $i (@$a) {
if (\%n) {
while (my ($inc, $inc_array) = each %{$modules_hashref->{$r}->{$i}}) {
push @{$n{$inc}}, @$inc_array;
}
} else {
\%n = $modules_hashref->{$r}->{$i};
}
}
&FlattenHash(\%n);
$m->{$t}->{'module'} = \%n;
}
}
}
sub expand_access_modules {
my ($aa) = @_;
my $modules_hashref;
for my $ah (@$aa) {
next unless $ah->{'location'};
my $r = $ah->{'cvsroot'};
$modules_hashref->{$r} = eval &retrieve("modules", {"cvsroot_id" => $r}) unless $r eq "#-all-#";
my $a = $ah->{'location'}->{'module'};
next unless defined $a;
my %n;
for my $i (@$a) {
if (\%n) {
while (my ($inc, $inc_array) = each %{$modules_hashref->{$r}->{$i}}) {
push @{$n{$inc}}, @$inc_array;
}
} else {
\%n = $modules_hashref->{$r}->{$i};
}
}
&FlattenHash(\%n);
$ah->{'location'}->{'module'} = \%n;
}
}
sub ExpandHash {
my $hash = $_[0] ;
my $hash2 = $_[1] ? $_[1] : $_[0] ;
# &CheckCircularity($hash2);
# print "not circular\n";
my $done = 0 ;
until ($done) {
$done = 1 ;
for my $key (keys %$hash) {
for my $i (0..$#{$$hash{$key}}) {
if (exists ($$hash2{$$hash{$key}[$i]})) {
$done = 0 ;
splice ( @{$$hash{$key}}, $i, 1, @{$$hash2{$$hash{$key}[$i]}} ) ;
}
}
}
}
}
sub CheckCircularity {
my $hash = $_[0] ;
my @LHS ;
my @RHS ;
my $count = 0 ;
for my $k (keys(%$hash)) {
$LHS[$count]=$k ;
$RHS[$count]=join(':', @{$hash->{$k}}) ;
$count++ ;
}
#---------------------------------------------#
# check for, and report, circular references #
#---------------------------------------------#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#for (my $i=0; $i<=$#LHS; ++$i) {print "$LHS[$i] = $RHS[$i]\n";} #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
my $sort_count = 0 ;
my $unsorted_end = $#RHS ;
SORT: for (my $i=$unsorted_end; $i>=0; --$i) {
my $search_name = $LHS[$i] ;
for (my $j=$i; $j>=0; --$j) {
if ($RHS[$j] =~ /^$search_name:|:$search_name:|:$search_name$|^$search_name$/){
unshift @LHS, $LHS[$i] ;
unshift @RHS, $RHS[$i] ;
splice @LHS, $i+1, 1 ;
splice @RHS, $i+1, 1 ;
++$sort_count ;
if ($sort_count == $i+1) {
print "\ncircular reference involving the following:\n\n" ;
print "\t$LHS[0]" ;
for my $x (1..$i) { print " : $LHS[$x]" }
print "\n" ;
for my $x (0..$i) {
$RHS[$x] =~ s/:/ & /g ;
print "\n\t$LHS[$x] --> $RHS[$x]" ;
}
print "\n\nyou suck, try again.\n\n" ;
exit 1;
}
goto SORT ;
}
}
--$unsorted_end ;
$sort_count = 0 ;
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#print "\n"; #
#for (my $i=0; $i<=$#LHS; ++$i) {print "$LHS[$i] = $RHS[$i]\n";} #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
}
sub mirrored_checkin {
my ($log) = @_;
# print "$log\n========\n";
for my $regex (@::mirrored_checkin_reqex) {
# print "-- $regex\n";
# print "\nNO MIRROR FOR YOU\n\n" if ($log =~ $regex);
return 1 if ($log =~ $regex);
}
return 0;
}
sub nomirrored {
my ($directive) = @_;
if ($directive =~ /[#-]\s*[#-]([^-#]+)[#-]?\s*[#-]?/) {
###if ($directive =~ /#\s*-(.*)-\s*#/) {
$directive = $1
} else {
$directive = "none"
}
print "directive --> $directive (", soundex($directive), ")\n" if $directive;
return 1 if (soundex($directive) eq soundex("nomirror"));
return 0;
}
sub create_mirrors {
my ($change_ref, $mirror_ref) = @_ ;
my (@bro_array, %bro_hash);
while (my ($from, $branch_change_ref) = each %$change_ref) {
my $start_bro = $from."@".$::cvsroot."@";
# $Data::Dumper::Indent = 0 ;
# print "$start_bro -- $::directory -> " . Dumper([keys(%$branch_change_ref)]) , "\n";
# $Data::Dumper::Indent = 2 ;
$bro_hash{$start_bro} = {'start' => $start_bro, 'files' => [keys(%$branch_change_ref)], 'offset' => ''};
}
# print Dumper(\%bro_hash);
&make_mirror(\%bro_hash);
# print Dumper(\%::mirror_object);
# print "\n\nlogging mirror_object to database\n\n";
&insert_mirror_object;
}
sub make_mirror {
my ($hash) = @_;
my (%sub);
my %x = ("exclude" => 0, "overwrite" => 1, "mirror" => 2);
while (my ($bro, $details) = each %$hash) {
my ($bro_branch, $bro_root, $bro_offset, @old_bro) = split ("@", $bro);
$bro_offset = "" unless $bro_offset;
my $new_bro = $bro_branch."@".$bro_root."@".$bro_offset;
for my $m (@$::mirror) {
for my $to (@{$m->{to}}) {
my @sub_files;
if ($m->{from}->{branch} eq $bro_branch && $m->{from}->{cvsroot} eq $bro_root) {
my $adjusted_offset = &adjust_offset($bro_offset, $to->{'offset'});
my $sub_bro = "$to->{branch}\@$to->{cvsroot}\@$adjusted_offset";
if ($sub_bro ne $details->{'start'}) {
for my $f (@{$details->{'files'}}) {
my $type = &check_mirror($f, $m, $details->{'offset'});
if ($type && $x{$type} > 0) {
unless (defined $::mirror_object{$sub_bro}->{$::ch_id_ref->{$f}}) {
push @{$sub{$sub_bro."@".$new_bro}->{'files'}}, $f;
}
unless ( defined $::mirror_object{$sub_bro}->{$::ch_id_ref->{$f}} &&
$x{$type} < $x{$::mirror_object{$sub_bro}->{$::ch_id_ref->{$f}}} ) {
$::mirror_object{$sub_bro}->{$::ch_id_ref->{$f}} = $type;
}
}
}
if (defined $sub{$sub_bro."@".$new_bro}) {
$sub{$sub_bro."@".$new_bro}{'start'} = $details->{'start'};
$sub{$sub_bro."@".$new_bro}{'offset'} = $adjusted_offset;
}
}
}
}
}
}
# print "SUB-" x 19, "SUB\n", Dumper(\%sub), "SUB-" x 19, "SUB\n";
# print "\n###\n### Making more mirrors\n###\n" if %sub;
# if ($::COUNT++ < 100) {
# print "$::COUNT-" x 39, "$::COUNT\n", Dumper(\%::mirror_object);
&make_mirror(\%sub) if %sub;
# }
}
sub adjust_offset {
my ($p, $o) = @_;
my ($pL, $pR, $oL, $oR);
($pL, $pR) = split /\|/, $p;
($oL, $oR) = split /\|/, $o;
($pL, $oR) = &shorten($pL, $oR, 1);
($pR, $oL) = &shorten($pR, $oL, 0);
my $n = $pL.$oL."|".$oR.$pR;
return $n ne "|" ? $n : "" ;
}
sub shorten {
my ($a, $b, $e) = @_;
my $x;
$a = '' unless $a;
$b = '' unless $b;
if (length $a < length $b) {
$x = $a;
} else {
$x = $b;
}
if ($e) {
$x = "\Q$x\E\$" ;
} else {
$x = "^\Q$x\E" ;
}
if ($a =~ /$x/ && $b =~ /$x/) {
$a =~ s/$x//;
$b =~ s/$x//;
}
return ($a, $b);
}
sub check_mirror {
my ($file, $mirror_hashref, $offset) = @_;
my $type = undef;
for my $t ("mirror", "overwrite", "exclude") {
# print "$file -- $t -- $mirror_hashref->{'from'}->{'branch'} --> ";
# print Dumper($mirror_hashref->{'to'}), "\n";
if (defined $mirror_hashref->{$t}) {
$type = $t if &allowed($file, $mirror_hashref->{$t}, $offset);
}
}
# print Dumper($mirror_hashref);
return $type;
}
sub allowed {
my ($file, $type_hashref, $offset) = @_;
my %x = ('exclude' => 0, 'include' => 1);
# for my $st ("module", "directory", "file") {
for my $st ("file", "directory", "module") {
if (defined $type_hashref->{$st}) {
# print "=== $st", Dumper($type_hashref->{$st});
for my $type ("file", "directory") {
for my $clude ("exclude", "include") {
if (defined $type_hashref->{$st}->{$clude."_".$type}) {
return $x{$clude} if &match_array($file, $type_hashref->{$st}->{$clude."_".$type}, $offset) ;
}
}
}
}
}
return 0;
}
sub match_array {
my ($file, $regex_arrayref, $offset) = @_;
$file = $::directory."/".$file if $::directory;
$offset = "|" unless $offset;
my ($from_offset, $to_offset) = split /\|/, $offset;
$file =~ s/\Q$from_offset\E/$to_offset/;
for my $r (@$regex_arrayref) {
##
# print "#####".$file."#####".$r."#####";
# if ($file =~ /$r/) {
# print " MATCH\n";
# return 1;
# } else {
# print " NO-MATCH\n";
# }
##
return 1 if $file =~ /$r/ ;
}
return 0;
}
return 1;

View File

@@ -1,181 +0,0 @@
#!/usr/local/bin/perl -w
use strict;
use Sys::Hostname;
use Cwd;
use Data::Dumper;
use Time::HiRes;
#use Storable qw(lock_store lock_retrieve);
use vars qw($ch_id_ref);
#
# It's tempting to use environment variables for things like USER
# and CVSROOT; however, don't. Using the builtin CVS variables is
# a better idea, especially if you are using a three entry
# $CVSROOT/CVSROOT/passwd (i.e., cvs runs as a local user instead of
# the actual user)
#
$::cvsrootdir = shift @ARGV;
#
# I'd really rather have all my "use" and "require" statements before
# anything else, but since I want to keep the bonsai-global.pm module
# checked into $CVSROOT/CVSROOT, I need to do the ugly "parse" the
# root first, then require the module foo you see here.
#
require "$::cvsrootdir/CVSROOT/bonsai-global.pm";
require "$::cvsrootdir/CVSROOT/bonsai-config.pm";
$::start = Time::HiRes::time;
$::cwd = cwd;
$::user = shift @ARGV;
$::time = time;
$::directory = shift @ARGV ;
#$::directory =~ s/^\"(.*)\"$/$1/;
$::cvsroot = hostname() . ":" . $::cvsrootdir;
#print "CWD: $::cwd\n### USER: $::user\n### TIME: $::time\n### DIR: $::directory\n### CVSROOT: $::cvsroot\n";
$::log = 0;
while (<>) {
#print " -- $_";
if (/^Log Message:$/) {
$::log = 1;
next;
}
next until $::log;
$::logtext .= $_;
#print " ---- $_";
}
$::logtext =~ s/[\s\n]*$//;
#-debug-# &debug_msg("LOG: $::logtext", 3);
if (-e "CVS/Entries") { # if block for first intermodule mirrored add of a new directory
open (ENTRIES, "<CVS/Entries") || die "Can't open CVS/Entries" ;
while (<ENTRIES>) {
chomp;
#-debug-# print "CVS/Entries: $_\n";
my @line = split /\//;
next if &get('code', @line);
my $file = &get('file', @line);
my $branch = &get('tag', @line);
$::change_ref->{$branch}->{$file}->{'old'} = &get('rev', @line);
$::change_ref->{$branch}->{$file}->{'old'} =~ s/^-//;
undef $file;
undef $branch;
undef @line;
}
close ENTRIES;
}
if (-e "CVS/Entries.Log") { # if block for directory adds since CVS/Entries.log doesn't get created for directory adds
open (ENTRIES, "<CVS/Entries.Log") || die "Can't open CVS/Entries.Log" ;
while (<ENTRIES>) {
chomp;
#-debug-# print "CVS/Entries.log: $_\n";
my @line = split /\//;
next if (&get('code', @line) eq 'A D'); # the if block isn't enough, this covers cvs add foo foo/* foo/*/* ...
my $file = &get('file', @line);
my $branch = &get('tag', @line);
$::change_ref->{$branch}->{$file}->{'new'} = &get('rev', @line);
# $::change_ref->{$branch}->{$file}->{'new'} =~ s/^-[1-9][0-9\.]+$/NONE/;
$::change_ref->{$branch}->{$file}->{'new'} =~ s/^-[1-9][0-9\.]+$/0/;
undef $file;
undef $branch;
undef @line;
}
close ENTRIES;
}
&collapse_HOHOH($::change_ref, ['new', 'old']);
&connect();
$::mirror = eval &retrieve("expanded_mirrorconfig");
#print "\$expanded_mirrorconfig",Dumper($::mirror);
###
### directory/file specific actions
###
if ($::directory eq "CVSROOT") {
my $modulesfile = "./modules";
if (-e $modulesfile ) { # create an expanded modules file and store it in the database
my $modules = `cat $modulesfile`;
my $modules_hash = &BuildModuleHash($modules) ;
&ExpandHash($modules_hash); # Expand the modules file in terms of itself
&FlattenHash($modules_hash); # Remove dulicate entries from expansion
my $formatted_modules = &FormatModules($modules_hash, $::cvsrootdir); # Convert to regexs suitable for matching
&store("modules", $formatted_modules, {"cvsroot_id" => $::cvsroot, "rev" => $::change_ref->{'TRUNK'}->{'modules'}->{'new'}});
#
# update expanded_mirrorconfig & expanded_accessconfig
#
unless ($::change_ref->{'TRUNK'}->{'bonsai-mirrorconfig.pm'} &&
$::change_ref->{'TRUNK'}->{'bonsai-mirrorconfig.pm'}->{'new'}) {
my $mc = eval &retrieve("mirrorconfig");
&expand_mirror_modules($mc);
&store("expanded_mirrorconfig", $mc);
}
unless ($::change_ref->{'TRUNK'}->{'bonsai-accessconfig.pm'} &&
$::change_ref->{'TRUNK'}->{'bonsai-accessconfig.pm'}->{'new'}) {
my $ac = eval &retrieve("accessconfig");
&expand_access_modules($ac);
&store("expanded_accessconfig", $ac);
}
}
if ($::change_ref->{'TRUNK'}->{'bonsai-mirrorconfig.pm'} &&
$::change_ref->{'TRUNK'}->{'bonsai-mirrorconfig.pm'}->{'new'}) {
require "./bonsai-mirrorconfig.pm";
#print Dumper($::mirrorconfig), "#" x 80, "\n";
&format_mirrorconfig($::mirrorconfig); # Convert to regexs suitable for matching
#print Dumper($::mirrorconfig), "#" x 80, "\n";
&store("mirrorconfig", $::mirrorconfig, {"cvsroot_id" => $::cvsroot, "rev" => $::change_ref->{'TRUNK'}->{'bonsai-mirrorconfig.pm'}->{'new'}});
#
# update expanded_mirrorconfig
#
#$Data::Dumper::Indent=2;
#$Data::Dumper::Terse=0;
#print Dumper($::mirrorconfig), "#" x 80, "\n";
&expand_mirror_modules($::mirrorconfig);
#print Dumper($::mirrorconfig);
&store("expanded_mirrorconfig", $::mirrorconfig);
}
if ($::change_ref->{'TRUNK'}->{'bonsai-accessconfig.pm'} &&
$::change_ref->{'TRUNK'}->{'bonsai-accessconfig.pm'}->{'new'}) {
require "./bonsai-accessconfig.pm";
#print Dumper($::accessconfig), "#" x 80, "\n";
&format_accessconfig($::accessconfig); # Convert to regexs suitable for matching
&store("accessconfig", $::accessconfig, {"cvsroot_id" => $::cvsroot, "rev" => $::change_ref->{'TRUNK'}->{'bonsai-accessconfig.pm'}->{'new'}});
#
# update expanded_accessconfig
#
#$Data::Dumper::Indent=2;
#$Data::Dumper::Terse=0;
#print Dumper($::accessconfig), "#" x 80, "\n";
&expand_access_modules($::accessconfig);
#print Dumper($::accessconfig);
&store("expanded_accessconfig", $::accessconfig);
}
}
###
### Create checkin and mirror objects in database
###
#-debug-# &debug_msg("logging checkins in $::directory to database...", 1);
&update_commit($::cwd, $::user, $::time, $::directory, $::cvsroot, 'creating checkin object');
($::id, $::ch_id_ref) = &checkin($::cwd, $::user, $::time, $::directory, $::cvsroot, $::logtext, $::change_ref);
#print Dumper($::change_ref);
#-debug-# &debug_msg("\ncheckin id: $::id\n", 0, { prefix => 'none' });
unless (&mirrored_checkin($::logtext)) {
#print "\n--> creating mirror objects <--\n\n";
#unless (&mirrored_checkin($::logtext) || &nomirrored($::logtext)) {
&update_commit($::cwd, $::user, $::time, $::directory, $::cvsroot, 'creating mirror object(s)');
&create_mirrors($::change_ref, $::mirror);
}
#print "FINAL-" x 12, "FINAL\n", Dumper(\%::mirror_object) if %::mirror_object;
&update_commit($::cwd, $::user, $::time, $::directory, $::cvsroot, 'checkin complete');
&delete_commit($::cwd, $::user, $::time, $::directory, $::cvsroot);
&log_performance("loginfo_performance", $::id, Time::HiRes::time - $::start);
&disconnect();
#while (my ($file, $change_id) = each %$::ch_id_ref) { print "--> $change_id -- $file\n" }

View File

@@ -1,398 +0,0 @@
#!/usr/local/bin/perl -w
#use diagnostics;
use strict;
$::mirrorconfig = [
{
'from' => {
'branch' => 'B1',
'cvsroot' => 'neutron:/var/cvs',
},
'to' => [
{
'branch' => 'T1',
'cvsroot' => 'neutron:/var/cvs',
'offset' => '',
},
],
'mirror' => {
'directory' => [ 'mirror-test' ],
},
},
{
'from' => {
'branch' => 'T1',
'cvsroot' => 'neutron:/var/cvs',
},
'to' => [
{
'branch' => 'TRUNK',
'cvsroot' => 'neutron:/var/cvs',
'offset' => 'mirror-test/|modules/mirror-test/foo/',
},
],
'mirror' => {
'directory' => [ 'mirror-test' ],
},
},
{
'from' => {
'branch' => 'TRUNK',
'cvsroot' => 'neutron:/var/cvs',
},
'to' => [
{
'branch' => 'T2',
'cvsroot' => 'neutron:/var/cvs',
'offset' => '',
},
],
'mirror' => {
'directory' => [ 'modules2' ],
},
},
{
'from' => {
'branch' => 'TRUNK',
'cvsroot' => 'neutron:/var/cvs',
},
'to' => [
{
'branch' => 'TRUNK',
'cvsroot' => 'neutron:/var/cvs',
'offset' => 'modules/|modules2/',
},
],
'mirror' => {
'directory' => [ 'modules/mirror-test' ],
},
},
#{
# 'from' => {
# 'branch' => 'BMS_REL_3_0_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'BMS_REL_3_1_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# },
# {
# 'branch' => 'BMS_REL_3_2_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# },
# {
# 'branch' => 'BMS_REL_4_0_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# },
# {
# 'branch' => 'TRUNK',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'directory' => [
# 'tools/config'
# ]
# },
# 'exclude' => {
# 'file' => [
# 'tools/config/classpath_solaris',
# 'tools/config/classpath_nt'
# ]
# },
#},
#{
# 'from' => {
# 'branch' => 'BMS_REL_3_1_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'BMS_REL_3_2_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'module' => [
# 'Vermouth',
# 'BMTools'
# ]
# },
#},
#{
# 'from' => {
# 'branch' => 'BMS_REL_4_0_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'BMS_REL_4_0_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'module' => [
# 'Vermouth',
# 'BMTools'
# ]
# },
# 'exclude' => {
# 'directory' => [
# 'demo',
# 'projects/config',
# 'bmsrc/packages/com/bluemartini/automation',
# 'projects/automation',
# ],
# },
#},
#{
# 'from' => {
# 'branch' => 'BMS_REL_4_1_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'TRUNK',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'directory' => [
# 'translation'
# ]
# },
# 'exclude' => {
# 'directory' => [ 'bmsrc/apps/ams' ],
# },
#},
#{
# 'from' => {
# 'branch' => 'BMS_REL_4_1_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'Marvin_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'module' => [
# 'Vermouth',
# 'BMTools'
# ]
# },
# 'exclude' => {
# 'directory' => [ 'bmsrc/apps/ams' ],
# },
#},
#{
# 'from' => {
# 'branch' => 'BMS_REL_4_1_M_1_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'Marvin_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'module' => [
# 'Vermouth',
# 'BMTools',
# 'BMInstall'
# ]
# },
#},
#{
# 'from' => {
# 'branch' => 'BMS_REL_4_1_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'TRUNK',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'module' => [
# 'Vermouth',
# 'BMTools',
# 'BMInstall'
# ]
# },
# 'exclude' => {
# 'directory' => [
# 'demo',
# 'bmsrc/packages/com/bluemartini/automation',
# 'projects/automation',
# 'bmsrc/apps/ams'
# ]
# },
#},
#{
# 'from' => {
# 'branch' => 'Stanford_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'TRUNK',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'module' => [
# 'Vermouth',
# 'BMTools',
# 'BMInstall'
# ]
# },
# 'exclude' => {
# 'directory' => [
# 'demo',
# 'bmsrc/packages/com/bluemartini/automation',
# 'projects/automation'
# ]
# },
#},
#{
# 'from' => {
# 'branch' => 'TRUNK',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'db2_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'module' => [
# 'Vermouth',
# 'BMTools',
# 'BMInstall'
# ],
## 'file' => [
## 'test/foo.sh'
## ],
## 'directory' => [
## 'CVSROOT',
## ]
# },
# 'exclude' => {
# 'directory' => [
# 'demo'
# ],
## 'file' => [
## 'makefile'
## ],
# },
#},
##{
## 'from' => {
## 'branch' => 'TRUNK',
## 'cvsroot' => 'neutron:/var/cvs',
## },
## 'to' => [
## {
## 'branch' => 'incognitus_Dev_BRANCH',
## 'cvsroot' => 'neutron:/var/cvs',
## 'offset' => '',
## }
## ],
## 'mirror' => {
## 'module' => [
## 'Vermouth',
## 'BMTools',
## 'BMInstall'
## ]
## },
## 'exclude' => {
## 'directory' => [
## 'demo'
## ]
## },
##},
#{
# 'from' => {
# 'branch' => 'db2_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'incognitus_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'module' => [
# 'Vermouth',
# 'BMTools',
# 'BMInstall'
# ],
## 'directory' => [
## 'CVSROOT',
## ]
# },
# 'exclude' => {
# 'directory' => [
# 'demo'
# ],
# 'file' => [
# 'makefile'
# ]
# },
#},
##
## testing foo below
##
#{
# 'from' => {
# 'branch' => 'TRUNK',
# 'cvsroot' => 'neutron:/var/cvs',
# },
# 'to' => [
# {
# 'branch' => 'Test2_Dev_BRANCH',
# 'cvsroot' => 'neutron:/var/cvs',
# 'offset' => '',
# }
# ],
# 'mirror' => {
# 'module' => [
# 'thj'
# ],
# 'directory' => [
# 'test'
# ],
# 'file' => [
# 'test/foo.sh',
# ],
# },
# 'exclude' => {
## 'directory' => [
## 'demo'
## ],
# 'file' => [
# 'test/foo.sh',
# ],
# },
#}
];
return 1;

View File

@@ -1,4 +0,0 @@
bonsai-config.pm
bonsai-global.pm
bonsai-loginfo.pl
bonsai-commitinfo.pl

View File

@@ -1 +0,0 @@
ALL $CVSROOT/CVSROOT/bonsai-commitinfo.pl ${CVSROOT} ${USER}

View File

@@ -1 +0,0 @@
ALL $CVSROOT/CVSROOT/bonsai-loginfo.pl ${CVSROOT} ${USER} %{}

View File

@@ -1,8 +0,0 @@
This directory contains a work in progress. There is currently no documentation, and it is
almost guaranteed not to work on your system.
Therefore, I'd advise you to just pretend that it's not here for now.
Really.
11.1.02

View File

@@ -1,39 +0,0 @@
package DB::Insert;
use DBI;
use strict;
sub exec_log {
#use Data::Dumper;
#print Dumper(\@_);
$::dbh->do("
INSERT INTO
`exec_log`
SET
time = UNIX_TIMESTAMP(),
command = ?,
stdout = ?,
stderr = ?,
exit_value = ?,
signal_num = ?,
dumped_core = ?
", undef, @_);
return $::dbh->selectrow_array("SELECT LAST_INSERT_ID()");
}
sub mirror_change_exec_map {
#use Data::Dumper;
#print Dumper(\@_);
$::dbh->do("
INSERT INTO
`mirror_change_exec_map`
SET
mirror_id = ?,
change_id = ?,
exec_log_id = ?
", undef, @_);
}
return 1;
__END__

View File

@@ -1,142 +0,0 @@
package DB::Select;
use DBI;
use strict;
use Sys::Hostname;
sub mirrors {
my $sth = $::dbh->prepare("
SELECT
m.id, m.checkin_id, b.value, r.value, o.value
FROM
checkin c, mirror m, branch b, cvsroot r, offset o, status s
WHERE
c.id = m.checkin_id
AND b.id = m.branch_id
AND r.id = m.cvsroot_id
AND o.id = m.offset_id
AND s.id = m.status_id
AND c.time < ? - ?
AND s.value = ?
AND r.value RLIKE ?
ORDER BY
c.time, checkin_id, m.id
");
my $arrayref = $::dbh->selectall_arrayref(
$sth,
undef,
time,
$::mirror_delay,
shift,
'^' . Sys::Hostname::hostname() . ':.*$'
);
$sth->finish();
return $arrayref;
}
sub checkin {
my $sth = $::dbh->prepare("
SELECT
u.value as user, d.value as directory, l.value as log, r.value as cvsroot
FROM
checkin c, user u, directory d, log l, cvsroot r
WHERE
u.id = c.user_id
AND d.id = c.directory_id
AND l.id = c.log_id
AND r.id = c.cvsroot_id
AND c.id = ?
LIMIT 1
");
$sth->execute(shift);
my $hashref = $sth->fetchrow_hashref();
$sth->finish();
return $hashref;
}
sub change {
my $sth = $::dbh->prepare("
SELECT
f.value as file, ch.oldrev, ch.newrev, b.value as branch
FROM
`change` ch, file f, branch b
WHERE
f.id = ch.file_id
AND b.id = ch.branch_id
AND ch.id = ?
LIMIT 1
");
$sth->execute(shift);
my $hashref = $sth->fetchrow_hashref();
$sth->finish();
return $hashref;
}
sub mirror_changes {
my $sth = $::dbh->prepare("
SELECT
mcm.change_id, t.value as type
FROM
mirror_change_map mcm, type t, status s
WHERE
t.id = mcm.type_id
AND s.id = mcm.status_id
AND s.value = ?
AND mcm.mirror_id = ?
");
my $arrayref = $::dbh->selectall_arrayref(
$sth,
undef,
@_
);
$sth->finish();
return $arrayref;
}
sub runtime {
my $sth = $::dbh->prepare("
SELECT
c.value as command,
ri.mirror_delay,
ri.min_scan_time,
ri.throttle_time,
ri.max_addcheckins,
ri.last_update,
ri.mh_command_response as response,
ri.id
FROM
mh_runtime_info ri, mh_command c
WHERE
ri.mh_hostname_id = ?
AND ri.mh_command_id = c.id
ORDER BY
ri.id DESC,
ri.time DESC
LIMIT 1
");
$sth->execute(shift);
my $hashref = $sth->fetchrow_hashref();
$sth->finish();
return $hashref;
}
sub branch_eol {
my ($r, @ba) = @_;
return $::dbh->selectcol_arrayref("
SELECT
b.value
FROM
`cvsroot_branch_map_eol` m, `cvsroot` r, `branch` b
WHERE
r.id = m.cvsroot_id
AND b.id = m.branch_id
AND (b.value = ?" . (" OR b.value = ?" x $#ba) . ")
",
undef,
@ba
);
}
return 1;
__END__

View File

@@ -1,70 +0,0 @@
package DB::Update;
use DBI;
use strict;
use DB::Util;
BEGIN {
}
sub mirror {
#use Data::Dumper;
#print Dumper(\@_);
my ($id, $status) = @_;
$::dbh->do("
UPDATE
`mirror`
SET
status_id = ?
WHERE
id = ?
",
undef,
&DB::Util::id('status', $status, {'read_only' => 1}),
$id
);
}
sub mirror_change {
#use Data::Dumper;
#print Dumper(\@_);
my ($mid, $chid, $status) = @_;
$::dbh->do("
UPDATE
`mirror_change_map`
SET
status_id = ?
WHERE
mirror_id = ?
AND change_id = ?
",
undef,
&DB::Util::id('status', $status, {'read_only' => 1}),
$mid,
$chid
);
}
sub runtime {
#use Data::Dumper;
#print Dumper(\@_);
my ($r, $id) = @_;
$r++;
$::dbh->do("
UPDATE
`mh_runtime_info`
SET
mh_command_response = ?
WHERE
id = ?
",
undef,
$r,
$id
);
}
return 1;
__END__

View File

@@ -1,66 +0,0 @@
package DB::Util;
use DBI;
use strict;
sub connect {
unless ($::dbh->{'Active'}) {
$::dbh = DBI->connect(
$::db{$default::db}{'dsn'},
$::db{$default::db}{'user'},
$::db{$default::db}{'pass'},
{
PrintError => 1,
RaiseError => 1
}
);
}
}
sub disconnect {
$::dbh->disconnect() if ($::dbh->{'Active'});
}
sub id {
#use Data::Dumper;
#print Dumper(\@_);
my ($table, $value, $hashref) = @_;
my ($column, $key, $ro, $id);
unless ($column = ${$hashref}{"column"}) { $column = $default::column }
unless ($key = ${$hashref}{"key"}) { $key = $default::key }
unless ($ro = ${$hashref}{"read_only"}) { $ro = 0 }
unless ($id = $::dbh->selectrow_array("SELECT $key FROM `$table` WHERE $column = ?", undef, $value)) {
unless ($ro) {
$::dbh->do("INSERT INTO `$table` SET $column = ?", undef, $value);
$id = $::dbh->selectrow_array("SELECT LAST_INSERT_ID()");
} else {
die "\nThe value \"$value\" was not found in column \"$column\" of table \"$table\" during a read-only ID lookup operation.\n\n";
}
}
#-debug-# print "\n$id\n\n";
return $id;
}
sub retrieve {
my ($table, $where_ref, $hashref) = @_;
my ($column, $value, $where, @bind);
unless ($column = ${$hashref}{"column"}) { $column = $default::column }
while (my ($col, $val) = each %$where_ref) {
$where .= $col ." = ? AND ";
if ($col =~ /.*_id$/) {
$col =~ s/_id$//;
push @bind, &id($col, $val);
} else {
push @bind, $val;
}
}
$where .= "1";
$value = $::dbh->selectrow_array("SELECT $column FROM `$table` WHERE $where ORDER BY id DESC LIMIT 1", undef, @bind);
#print "SELECT $column FROM $table WHERE $where ORDER BY id DESC LIMIT 1", @bind;
#for my $i (@bind) { print "$i\n" }
return $value;
}
return 1;
__END__

View File

@@ -1,65 +0,0 @@
package access;
use strict;
use Data::Dumper;
sub allowed {
my ($file, $type_hashref) = @_;
my %x = ('exclude' => 0, 'include' => 1);
# for my $st ("module", "directory", "file") {
for my $st ("file", "directory", "module") {
if (defined $type_hashref->{$st}) {
# print "=== $st", Dumper($type_hashref->{$st});
for my $type ("file", "directory") {
for my $clude ("exclude", "include") {
if (defined $type_hashref->{$st}->{$clude."_".$type}) {
return $x{$clude} if &match_array($file, $type_hashref->{$st}->{$clude."_".$type}) ;
}
}
}
}
}
return 0;
}
sub match_array {
my ($file, $regex_arrayref) = @_;
# $file = $directory."/".$file if $directory;
for my $r (@$regex_arrayref) {
##
# print "#####".$file."#####".$r."#####";
# if ($file =~ /$r/) {
# print " MATCH\n";
# return 1;
# } else {
# print " NO-MATCH\n";
# }
##
return 1 if $file =~ /$r/ ;
}
return 0;
}
sub rule_applies {
my ($ah, $cvsroot, $branch, $file) = @_;
my $return = 0;
if (($cvsroot eq $ah->{'cvsroot'} || $ah->{'cvsroot'} eq "#-all-#") &&
($branch eq $ah->{'branch'} || $ah->{'branch'} eq "#-all-#")) {
return &allowed($file, $ah->{'location'});
}
return 0;
}
#next if &access::closed{$accessconfig, $to_branch, $directory, $file);
sub closed {
my ($accessconfig, $cvsroot, $branch, $directory, $file) = @_;
$file = $directory."/".$file if $directory;
for my $access_rule (@$accessconfig) {
if (&rule_applies($access_rule, $cvsroot, $branch, $file)) {
return 1 if ( $access_rule->{'close'} ); # { print "\nclosed\n\n" }
}
}
return 0;
}
return 1;

View File

@@ -1,45 +0,0 @@
package default;
use strict;
#
# addresses to which to complain
#
$default::admin_address = 'release-eng@bluemartini.com';
$default::pager_address = 'vajonez@yahoo.com';
#
# default times (seconds)
#
# how long to wait after a checkin to start mirroring
# this is mostly to give folks a time to nomirror
$default::mirror_delay = 15 * 60;
# let's be kind and not hammer the network/database.
# minimum time between checks of the database
$default::min_scan_time = 10;
# the old bonsai code uses a really inefficient means
# of getting checkin info into the database. each
# addcheckin.pl process consumes ~8MB of memory and
# take several seconds to run. The following number
# is the number of addcheckins that we'd like to see
# running at any one time. If the number of addcheckin.pl's
# exceeds the number below, wait throttle_time seconds
# and try again.
$default::max_addcheckins = 20;
$default::throttle_time = 5;
#
# Database stuff (pick the correct one!)
#
$default::db = "development";
$default::column = "value";
$default::key = "id";
%::db = (
"production" => {
"dsn" => "dbi:mysql:database=bonsai;host=bonsai2",
"user" => "bonsai_mh",
"pass" => "password",
},
"development" => {
"dsn" => "dbi:mysql:database=bonsai_dev;host=bonsai2",
"user" => "bonsai_dev_mh",
"pass" => "password",
},
);
return 1;

View File

@@ -1,732 +0,0 @@
#!/usr/local/bin/perl -w
#use Time::HiRes;
#$::start = Time::HiRes::time;
#use Cwd;
use strict;
use Sys::Hostname;
use Getopt::Long;
use File::Basename;
use File::Path;
use FindBin;
use MIME::Lite;
use Data::Dumper;
use lib $FindBin::Bin;
use config;
use proc;
use DB::Util;
use DB::Insert;
use DB::Update;
#
# Trap some signals and send mail to the interested parties
#
$SIG{HUP} = \&signal_handler;
$SIG{INT} = \&signal_handler;
$SIG{TERM} = \&signal_handler;
$SIG{QUIT} = \&signal_handler;
$SIG{SEGV} = \&signal_handler;
$SIG{__DIE__} = \&signal_handler;
sub signal_handler {
my $msg = join "\n--\n", (@_, "mirror.pl is quitting now.\n");
unless ($_[0] =~ /^.* failed at .*proc.pm line \d{1,3}\.$/) {
&proc::notify("[CVS-mirror] FATAL ERROR", $msg);
}
die @_;
};
#sub {
# my $msg = join "\n--\n", (@_, "mirror.pl is quitting now.\n");
# unless ($_[0] =~ /^.* failed at .*proc.pm line \d{1,3}\.$/) {
# &proc::notify("[CVS-mirror] FATAL ERROR", $msg);
# }
# die @_;
#};
my $CVS = "/usr/local/bin/cvs";
my $DIFF = "/usr/local/bin/diff";
my $DIFF3 = "/usr/local/bin/diff3";
my $PATCH = "/usr/local/bin/patch";
my $h = {};
my $paramref = {
'return' => 'hashref',
# 'noop' => 0,
# 'log_stdout' => 1,
'log_always' => 1,
'workdir' => 'tmp',
'keep_dir' => 1,
# 'nomail' => 1,
};
#
# Get the command line options. do not modify the values in the hash
# instead modify the local scalars
#
GetOptions ($h,
'mirror_id=i',
'change_id=i',
'action=s',
'user=s',
'from_branch=s',
'from_cvsroot=s',
'to_branch=s',
'to_cvsroot=s',
'offset:s',
'directory:s',
'file=s',
'oldrev=s',
'newrev=s',
'log:s',
);
#
# I know this appears to be a gratuitious waste of memory, but I want to
# keep the original unmodified values in the %h hash and the munged values
# in local scalars. I don't care if you don't like it and think that it's
# silly.
#
my $mirror_id = $h->{'mirror_id'};
my $change_id = $h->{'change_id'};
my $action = $h->{'action'};
my $user = $h->{'user'};
my $from_branch = $h->{'from_branch'};
my $from_cvsroot = $h->{'from_cvsroot'};
my $to_branch = $h->{'to_branch'};
my $to_cvsroot = $h->{'to_cvsroot'};
my $offset = $h->{'offset'};
my $directory = $h->{'directory'};
my $file = $h->{'file'};
my $oldrev = $h->{'oldrev'};
my $newrev = $h->{'newrev'};
my $log = $h->{'log'};
#
# Create aggregate variables and quotemeta things that need quoting
# I'm quoting stuff (like mirror_id, rev numbers, and branch) that
# don't technically require it, just in case (however unlikely) CVS
# or bonsai change the way they operate.
#
$mirror_id = quotemeta($mirror_id);
$change_id = quotemeta($change_id);
$action = quotemeta($action);
$user = quotemeta($user);
$from_branch = quotemeta($from_branch);
$to_branch = quotemeta($to_branch);
$oldrev = quotemeta($oldrev);
$newrev = quotemeta($newrev);
#
# munge the directory/filename using the offset to tweak from/to.
# this allows for inter-repository and inter-module mirroring
# (becareful, inter-x mirroring is *NOT* well tested)
#
my $from_dir_file = $directory ? $directory . "/" . $file : $file;
my $to_dir_file = $from_dir_file;
$offset = "|" unless $offset;
my ($from_offset, $to_offset) = split /\|/, $offset;
# remove \Q & \E below to allow from side regex matching; although, that is
# likely to open a panadora's box of problems for very little benefit.
# thj sez "don't do it"
$to_dir_file =~ s/\Q$from_offset\E/$to_offset/;
my $to_directory = dirname($to_dir_file);
my $to_file = basename($to_dir_file);
my $uq_to_directory = $to_directory;
$to_directory = quotemeta($to_directory);
$to_file = quotemeta($to_file);
my $uq_to_dir_file = $to_dir_file;
$to_dir_file = quotemeta($to_dir_file);
my $from_directory = quotemeta($directory);
my $from_file = quotemeta($file);
$from_dir_file = quotemeta($from_dir_file);
#
# determine the mirror checkin change type
#
my $change_type;
if ($oldrev && $newrev) {
$change_type = "checkin";
} elsif (!$oldrev && $newrev) {
$change_type = "add";
} elsif ($oldrev && !$newrev) {
$change_type = "remove";
} else {
die "Both and 'oldrev' and 'newrev' are undefined (mirror_id = $mirror_id, ".
"change_id = $change_id). This is bad. REAL BAD (trust me).\n\n" .
"If you are getting this error it means that the checkin/change got inserted into " .
"the database in an extremely bad way. Please to be fixing.\n";
}
#
# munge the log message to indicate this is a mirrored checkin of change_type $change_type
#
$log .= " (mirrored $change_type from $from_branch)";
$log = quotemeta($log);
#
# get the host name and fix cvsroots for local and remote access
# TODO: the remote access parts will require a read-only user on the
# remote repository and also a modified from_cvsroot that includes
# a conection method and user.
#
# TODO: exit if to_cvsroot != from_cvsroot. do so until I get adds working
#
my $hostname = Sys::Hostname::hostname();
$to_cvsroot =~ s/^$hostname://; # this should always match
$from_cvsroot =~ s/^$hostname://; # this should only sometimes match
#
# Oh what a lame ass hack. the old bonsai does stupid shit with
# rlog, and uses $ENV{'CVSROOT'}. um, that's lame.
#
$ENV{'CVSROOT'} = $to_cvsroot;
my $uq_to_cvsroot = $to_cvsroot;
$to_cvsroot = quotemeta($to_cvsroot);
$from_cvsroot = quotemeta($from_cvsroot);
#
# if we are mirroring to/from the TRUNK branch (TRUNK)
# do not include a -r option on the command line
# (from_branch_arg is probaly wasted since we have rev numbers
# and should therefore never need it, but i like symmetry).
#
my $to_branch_arg = ($to_branch && $to_branch ne "TRUNK") ? "-r $to_branch" : "" ;
my $from_branch_arg = ($from_branch && $from_branch ne "TRUNK") ? "-r $from_branch" : "" ;
#
# Determine the appropriate merge type (cvs or diff3)
#
my $merge_type;
if ($offset ne "|" || $from_cvsroot ne $to_cvsroot) {
$merge_type = 'diff3';
} else {
$merge_type = 'cvs';
}
my $status;
$status = &mirror($merge_type, $change_type);
$status = &diff_patch if $status eq 'conflict';
$status = &mirror($merge_type, $change_type, 1) if $status eq 'conflict';
&error_detected if $status eq "error";
$status = $status eq "merge" ? $merge_type."_".$status : $status;
&update_status($status);
#
# Subroutines
#
sub mirror {
my ($merge_type, $change_type, $force_ci) = @_;
my ($cmd, $r, $status) = undef;
$force_ci = $force_ci ? '-f' : '' ;
$status = 'merge';
if ($merge_type eq 'cvs') {
unlink "tmp/$uq_to_dir_file" if (-f "tmp/$uq_to_dir_file");
$r = &run_and_log("$CVS -d $to_cvsroot co $to_branch_arg -j $oldrev -j $newrev $to_dir_file");
&missing_file if (
$change_type eq 'checkin' &&
!-f "tmp/$uq_to_dir_file"
);
if ($change_type eq 'add' && defined $r->{'stderr'} &&
$r->{'stderr'} eq "cvs checkout: file $uq_to_dir_file exists, but has been added in revision $h->{'newrev'}\n") {
my $diff_to_branch = $to_branch eq "TRUNK" ? "HEAD" : $to_branch;
$r = &run_and_log("$CVS rdiff -r $newrev -r $diff_to_branch $to_dir_file", {'nomail' => 1});
&previously_added($r->{'stdout'} ? 'different' : 'same');
}
&previously_removed if (
$change_type eq 'remove' &&
defined $r->{'stderr'} &&
$r->{'stderr'} ne "cvs checkout: scheduling $uq_to_dir_file for removal\n"
);
$status = 'conflict' if (
$change_type eq 'checkin' &&
defined $r->{'stderr'} &&
$r->{'stderr'} eq "rcsmerge: warning: conflicts during merge\n"
);
if ( $change_type eq 'checkin' && defined $r->{'stderr'} &&
$r->{'stderr'} =~ /\Qcvs checkout: nonmergeable file needs merge\E/) {
my $diff_to_branch = $to_branch eq "TRUNK" ? "HEAD" : $to_branch;
my $nmfd = &run_and_log("$CVS rdiff -r $oldrev -r $diff_to_branch $to_dir_file", {'nomail' => 1});
$status = 'non_merge_overwrite' if $nmfd->{'stdout'};
}
if ($status eq 'merge' || $status eq 'non_merge_overwrite' || $force_ci) {
$r = &run_and_log("$CVS ci $force_ci -m $log $to_dir_file");
&conflicted if ($status eq 'conflict');
&non_merge if ($status eq 'non_merge_overwrite');
&previously_applied unless ($r->{'stdout'} || $r->{'stderr'} || $r->{'exit_value'});
$status = 'error' if (
$r->{'exit_value'} ||
(defined $r->{'stderr'} &&
$r->{'stderr'} !~ /^(\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] waiting for \E.*?\Q's lock in $uq_to_cvsroot\/$uq_to_directory\E\n)+\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] obtained lock in $uq_to_cvsroot\/$uq_to_directory\E\n$/
)
);
}
return $status;
} elsif ($merge_type eq 'diff3') {
#
# use diff3 (like cvs does internally) to mirror between modules and repositories.
# since cvs can't do the magic for us, we need to have separate actions for change, add, and remove.
#
# cleanup any cruft that might be left over from the previous attempt (prior to the forced checkin of the conflict)
unlink "tmp/$uq_to_dir_file" if (-f "tmp/$uq_to_dir_file");
# check keyword expansion mode of source file
my ($keywordmode, $option) = undef;
$r = &run_and_log("$CVS -d $from_cvsroot rlog -hN $from_dir_file | grep '^keyword substitution: '");
chomp($keywordmode = $r->{'stdout'});
$keywordmode =~ s/^^keyword substitution: //;
if ($change_type eq 'checkin') {
# for changes to existing files use diff3 to merge
# get the old revision
$r = &run_and_log("$CVS -q -d $from_cvsroot co -p -r $oldrev $from_dir_file > $from_file,$oldrev");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
# get the new revision
$r = &run_and_log("$CVS -q -d $from_cvsroot co -p -r $newrev $from_dir_file > $from_file,$newrev");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
# get the version from the destination module/branch. If it is not there send a missing_file warning
$r = &run_and_log(
"$CVS -d $to_cvsroot co $to_branch_arg $to_dir_file",
{'nomail' => 1}
);
&missing_file if (!-f "tmp/$uq_to_dir_file");
# if binary compare dest. with old and change status to "non_merge" and checkin the new source file
# if not, don't change the status (and thus send the mail), just checkin the new file
if ($keywordmode eq 'b') {
$r = &run_and_log("$DIFF -q $from_file,$oldrev $to_dir_file");
$status = "non_merge_overwrite" if $r->{'stdout'};
$r = &run_and_log("cp $from_file,$oldrev $to_dir_file");
# thereshould really be an error check here
} else {
# behold the magic that is diff3! (store result in foo,new)
$r = &run_and_log(
"$DIFF3 -E -am $to_dir_file $from_file,$oldrev $from_file,$newrev > $to_dir_file,new",
{'nomail' => 1}
);
$status = 'error' if ($r->{'exit_value'} == 2);
$status = 'conflict' if ($r->{'exit_value'} == 1);
# replace with the new file in prep for checkin
$r = &run_and_log("mv $to_dir_file,new $to_dir_file");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
}
# if we haven't conflicted yet, or we are conflicted and diff_patch couldn't handle it, it's time to checkin
if ($status eq 'merge' || $status eq 'non_merge_overwrite' || $force_ci) {
$r = &run_and_log("$CVS ci -m $log $to_dir_file");
# send bitch mail for conflicts
&conflicted if ($status eq 'conflict');
# send mail about binary changes where the files are different
&non_merge if ($status eq 'non_merge_overwrite');
# send more naggy mail if the checkin is a noop
&previously_applied unless ($r->{'stdout'} || $r->{'stderr'} || $r->{'exit_value'});
# set the status to "error" if we get a non-zero exit value or something unexpected on stderr
# (the ugly regex is to ignore lock bump messages)
$status = 'error' if (
$r->{'exit_value'} ||
(defined $r->{'stderr'} &&
$r->{'stderr'} !~ /^(\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] waiting for \E.*?\Q's lock in $uq_to_cvsroot\/$uq_to_directory\E\n)+\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] obtained lock in $uq_to_cvsroot\/$uq_to_directory\E\n$/
)
);
}
} elsif ($change_type eq 'add') {
# TODO: Needs mucho error handling and binary foo
# for added files, bootstrap/spoof some CVS admin directories and add the file
# check to see if the file is already here and send the appropriate bitch mail
$r = &run_and_log(
"$CVS -d $to_cvsroot co -d prev $to_branch_arg $to_dir_file",
{'nomail' => 1}
);
if (-f "tmp/$uq_to_dir_file") {
$r = &run_and_log("$CVS -q -d $from_cvsroot co -p -r $newrev $from_dir_file > $from_file,$newrev");
$r = &run_and_log("$DIFF -wB $from_file,$newrev prev/$to_file", {'nomail' => 1});
&previously_added($r->{'stdout'} ? 'different' : 'same');
}
#$r = &run_and_log("$CVS -d $from_cvsroot rlog -hN $from_dir_file | grep '^keyword substitution: '");
#my $option = $r->{'stdout'};
#$option =~ s/^^keyword substitution: /-/;
# create the directory structure for the new file
$r = &run_and_log("mkdir -p $to_directory");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
# get the new file and stash it in it's freshly created directory
$r = &run_and_log("$CVS -q -d $from_cvsroot co -p -r $newrev $from_dir_file > $to_dir_file");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
# spoof an existing checkout by populating a CVS admin directory
$r = &run_and_log("mkdir CVS");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
$r = &run_and_log("echo $to_cvsroot > CVS/Root");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
$r = &run_and_log("echo . > CVS/Repository");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
$r = &run_and_log("echo D > CVS/Entries");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
# you only need CVS/Tag if not on the trunk (also need to prefix with a "T")
if ($to_branch ne 'TRUNK') {
$r = &run_and_log("echo T$to_branch > CVS/Tag");
$status = 'error' if (defined $r->{'stderr'} || $r->{'exit_value'});
}
# recursively add the subdirs (as described in the `info cvs`) to create the appropriate admin dirs
my $add = undef;
for my $element (split("/", $uq_to_dir_file)) {
$add .= quotemeta($element);
# don forget to set the keyword expansion the same as the source file
$option = ($add eq $to_dir_file) ? "-k" . $keywordmode : "";
# add the dir/file
$r = &run_and_log("$CVS add $option $add");
# ignore some expected stderr output
$status = 'error' if (
$r->{'exit_value'} ||
(defined $r->{'stderr'} &&
$r->{'stderr'} ne
"cvs add: scheduling file `$uq_to_dir_file' for addition\n" .
"cvs add: use 'cvs commit' to add this file permanently\n" &&
$r->{'stderr'} !~
/^\Qcvs add: re-adding file $uq_to_dir_file (in place of dead revision \E[0-9\.]+?\)\n\Qcvs add: use 'cvs commit' to add this file permanently\E\n$/
)
);
$add .= quotemeta("/");
}
# checkin the new file
$r = &run_and_log("$CVS ci -m $log $to_dir_file");
# again ignore some expected error with big freaky regexs
$status = 'error' if (
$r->{'exit_value'} ||
(defined $r->{'stderr'} &&
$r->{'stderr'} ne
"cvs commit: changing keyword expansion mode to $option\n" &&
$r->{'stderr'} !~
/^((\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] waiting for \E.*?\Q's lock in $uq_to_cvsroot\/$uq_to_directory\E\n)+\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] obtained lock in $uq_to_cvsroot\/$uq_to_directory\E\n)?(\Qcvs commit: changing keyword expansion mode to $option\E\n)?$/
)
);
} elsif ($change_type eq 'remove') {
# removes are easy, first check to see if the file is even there, and send a message if not
$r = &run_and_log(
"$CVS -d $to_cvsroot co $to_branch_arg $to_dir_file",
{'nomail' => 1}
);
&previously_removed if (!-f "tmp/$uq_to_dir_file");
# remove the file (ignoring expected stderr output)
$r = &run_and_log("$CVS rm -f $to_dir_file");
$status = 'error' if (
$r->{'exit_value'} ||
(defined $r->{'stderr'} &&
$r->{'stderr'} ne
"cvs remove: scheduling `$uq_to_dir_file' for removal\n" .
"cvs remove: use 'cvs commit' to remove this file permanently\n"
)
);
# and check it in (ignoring expected stderr output)
$r = &run_and_log("$CVS ci -m $log $to_dir_file");
$status = 'error' if (
$r->{'exit_value'} ||
(defined $r->{'stderr'} &&
$r->{'stderr'} !~ /^(\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] waiting for \E.*?\Q's lock in $uq_to_cvsroot\/$uq_to_directory\E\n)+\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] obtained lock in $uq_to_cvsroot\/$uq_to_directory\E\n$/
)
);
} else {
die "Undefined change type ($change_type), Loser.\n\n";
}
} else {
die "Undefined merge type ($merge_type) specified.\n\n" .
"Your coding is weak and ineffectual.\n\n";
}
return $status;
}
#
# patch/diff command
#
sub diff_patch {
my ($r) = undef;
unlink "tmp/$uq_to_dir_file" if (-f "tmp/$uq_to_dir_file");
$r = &run_and_log("$CVS -d $to_cvsroot co $to_branch_arg $to_dir_file");
$r = &run_and_log(
"$CVS -d $from_cvsroot rdiff -c -r $oldrev -r $newrev $from_dir_file | $PATCH -c -N -l $to_dir_file",
{'nomail' => 1}
);
if ($r->{'exit_value'} && defined $r->{'stdout'}) {
if ($r->{'stdout'} =~ /\d+ out of \d+ hunk[s]? FAILED -- saving rejects to( file)? $to_dir_file\.rej/) {
return 'conflict';
} elsif ($r->{'stdout'} =~ /\QReversed (or previously applied) patch detected! Skipping patch.\E/) {
#
# patch lies and says the patch is reversed or previously applied when it is not.
# use mirror_id = 3247 & change_id = 31329 with GNU patch version 2.5.4 as a test case/example.
# Since we can't trust patch, return 'conflict' and force the checkin.
# &previously_applied;
return 'conflict';
}
return 'error'
}
$r = &run_and_log("$CVS ci -m $log $to_dir_file");
if (
$r->{'exit_value'} ||
(defined $r->{'stderr'} &&
$r->{'stderr'} !~ /^(\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] waiting for \E.*?\Q's lock in $uq_to_cvsroot\/$uq_to_directory\E\n)+\Qcvs commit: [\E([0-9]{2}:){2}[0-9]{2}\Q] obtained lock in $uq_to_cvsroot\/$uq_to_directory\E\n$/
)
) {
return 'error';
} else {
return 'diff_patch';
}
#return (defined $r->{'stderr'} || $r->{'exit_value'}) ? 'error' : 'diff_patch';
}
#
# Command executor and output logger
#
sub run_and_log {
my ($cmd, $param) = @_;
my $r = undef;
my $new_paramref = {%$paramref};
while (my($key, $value) = each (%$param)) {
$new_paramref->{$key} = $value;
}
&DB::Util::connect();
$r = &proc::run($cmd, $new_paramref);
&DB::Insert::mirror_change_exec_map(
$mirror_id,
$change_id,
$r->{'log_id'}
) if defined $r->{'log_id'};
&DB::Util::disconnect();
return $r;
}
#
# Send a message (and update the mirror_change status) if we try to mirror
# and a nonfatal error is detected.
#
sub error_detected {
my ($subject, $body, $from_root, $to_root) = undef;
if ($h->{'to_cvsroot'} eq $h->{'from_cvsroot'}) {
$to_root = "";
$from_root = "";
} else {
$to_root = " ($h->{'to_cvsroot'})";
$from_root = " ($h->{'from_cvsroot'})";
}
$subject = "error - $user - $to_branch - $uq_to_dir_file";
$body = qq#
Your checkin to $h->{'directory'}/$h->{'file'} on the $h->{'from_branch'}$from_root could not be mirrored to the $h->{'to_branch'}$to_root because $uq_to_dir_file an unexpected error was detected.
Whatever is broken (or not quite right) will likely need to be fixed by Release Engineering. This message is just to inform you that the mirror operation did not complete successfully. If you have any questions, please contact Release Engineering.
Release Engineering: Use the info below to look up the details of the error in the database.
#;
&mail($subject, $body);
&update_status("error");
}
#
# Send a message (and update the mirror_change status) if we try to mirror
# and the destination file doen't exist
#
sub missing_file {
my ($subject, $body, $from_root, $to_root) = undef;
if ($h->{'to_cvsroot'} eq $h->{'from_cvsroot'}) {
$to_root = "";
$from_root = "";
} else {
$to_root = " ($h->{'to_cvsroot'})";
$from_root = " ($h->{'from_cvsroot'})";
}
$subject = "missing file - $user - $to_branch - $uq_to_dir_file";
$body = qq#
Your checkin to $h->{'directory'}/$h->{'file'} on the $h->{'from_branch'}$from_root could not be mirrored to the $h->{'to_branch'}$to_root because $uq_to_dir_file was not found on the $h->{'to_branch'}$to_root.
This could be caused by any of a number of things, such as mirroring being misconfigured, the file might have originally been added to the $h->{'from_branch'} by a tagging operation instead of a \"cvs add\", or maybe it's been removed from the $h->{'to_branch'}$to_root.
If $uq_to_dir_file needs to be on the $h->{'to_branch'}$to_root either add it or contact Release Engineering.
#;
&mail($subject, $body);
&update_status("missing");
}
#
# Send mail if the mirror was to add a file and it already exists
#
sub previously_added {
my $diff = shift;
my ($subject, $body, $from_root, $to_root) = undef;
if ($h->{'to_cvsroot'} eq $h->{'from_cvsroot'}) {
$to_root = "";
$from_root = "";
} else {
$to_root = " ($h->{'to_cvsroot'})";
$from_root = " ($h->{'from_cvsroot'})";
}
my $foo = $diff eq "same" ? "the " : "";
$subject = "previously added ($diff) - $user - $to_branch - $uq_to_dir_file";
$body = qq#
Your add of $h->{'directory'}/$h->{'file'} to the $h->{'from_branch'}$from_root could not be mirrored to the $h->{'to_branch'}$to_root because $uq_to_dir_file was already present. The file you added and $uq_to_dir_file on the $h->{'to_branch'}$to_root are $foo$diff.
This may (or may not) indicate a problem. Contact Release Engineering if you have any questions.
#;
&mail($subject, $body);
&update_status("prev_add_$diff");
}
#
# Send mail if the mirror was to remove a file and it was already removed
#
sub previously_removed {
my ($subject, $body, $from_root, $to_root) = undef;
if ($h->{'to_cvsroot'} eq $h->{'from_cvsroot'}) {
$to_root = "";
$from_root = "";
} else {
$to_root = " ($h->{'to_cvsroot'})";
$from_root = " ($h->{'from_cvsroot'})";
}
$subject = "previously removed - $user - $to_branch - $uq_to_dir_file";
$body = qq#
Your remove of $h->{'directory'}/$h->{'file'} from the $h->{'from_branch'}$from_root could not be mirrored to the $h->{'to_branch'}$to_root because $uq_to_dir_file does not exist on the $h->{'to_branch'}$to_root. This file was either previously removed, or never existed on the $h->{'to_branch'}$to_root.
This may (or may not) indicate a problem. Contact Release Engineering if you have any questions.
#;
&mail($subject, $body);
&update_status("prev_rm");
}
#
# Send a message if the mirror results in a NOOP
#
sub previously_applied {
my ($subject, $body, $from_root, $to_root) = undef;
if ($h->{'to_cvsroot'} eq $h->{'from_cvsroot'}) {
$to_root = "";
$from_root = "";
} else {
$to_root = " ($h->{'to_cvsroot'})";
$from_root = " ($h->{'from_cvsroot'})";
}
$subject = "previously applied - $user - $to_branch - $uq_to_dir_file";
$body = qq#
Your checkin to $h->{'directory'}/$h->{'file'} on the $h->{'from_branch'}$from_root was not mirrored to $uq_to_dir_file on the $h->{'to_branch'}$to_root because that change appears to have already been applied.
This may (or may not) indicate a problem. Contact Release Engineering if you have any questions.
#;
&mail($subject, $body);
&update_status("noop");
}
#
# Send mail if the mirror involves a nonmergable file and the source and destination
# were different before the original checkin, since we are overwriting the destination
# file.
#
sub non_merge {
my ($subject, $body, $from_root, $to_root) = undef;
if ($h->{'to_cvsroot'} eq $h->{'from_cvsroot'}) {
$to_root = "";
$from_root = "";
} else {
$to_root = " ($h->{'to_cvsroot'})";
$from_root = " ($h->{'from_cvsroot'})";
}
$subject = "nonmergeable file - $user - $to_branch - $uq_to_dir_file";
$body = qq#
Your checkin to nonmergable file: $h->{'directory'}/$h->{'file'} on the $h->{'from_branch'}$from_root has mirrored to the $h->{'to_branch'}$to_root overwriting the file $uq_to_dir_file. Before your checkin these two files were DIFFERENT; however, now they are the same.
This may (or may not) desirable. Contact Release Engineering if you have any questions.
#;
&mail($subject, $body);
&update_status("non_merge_overwrite");
}
#
# Send email if conflicts were generated during the mirror
#
sub conflicted {
my ($subject, $body, $count, $conflict, $conflict_text, $to_root, $from_root) = undef;
if ($h->{'to_cvsroot'} eq $h->{'from_cvsroot'}) {
$to_root = "";
$from_root = "";
} else {
$to_root = " ($h->{'to_cvsroot'})";
$from_root = " ($h->{'from_cvsroot'})";
}
$conflict_text = "=" x 70 . "\nConflict Detail:\n";
open (CONFLICTFILE, "tmp/$uq_to_dir_file");
while (<CONFLICTFILE>) {
$count++;
if (/^<<<<<<< /) {
$conflict++;
if ($conflict == 1) {
$conflict_text .= "\nAt line $count:\n";
}
}
if ($conflict) {
$conflict_text .= $_;
}
if (/^>>>>>>> /) {
if ($conflict == 1) {
$conflict_text .= "\n";
}
$conflict--;
}
}
close (CONFLICTFILE);
$conflict_text .= "=" x 70 . "\n";
$subject = "CONFLICT - $user - $to_branch - $uq_to_dir_file";
$body = qq#
Your checkin to $h->{'directory'}/$h->{'file'} on the $h->{'from_branch'}$from_root has mirrored with conflicts (shown below) and the $h->{'to_branch'}$to_root is now broken.
THIS REQUIRES YOUR IMMEDIATE ATTENTION.
You can checkout the conflicted file with the following command:
cvs co $to_branch_arg $uq_to_dir_file
If you have any questions please contact Release Engineering.
#;
&mail($subject, "$body\n$conflict_text");
&update_status("conflict");
}
#
# handy dandy little mirror_change status updater function
#
sub update_status {
#-debug-#print Dumper(\@_);
&DB::Util::connect();
&DB::Update::mirror_change($mirror_id, $change_id, shift);
&DB::Util::disconnect();
exit 0;
}
#
# convenient little wrapper for MIME::Lite
# add a cute little message that contains a dump of all the options/values
# passed to this program (might be useful for debugging).
#
sub mail {
my ($subject, $text) = @_;
$Data::Dumper::Indent = 1;
$Data::Dumper::Terse = 1;
$text .= "\n--\n<jedi_mind_trick>\n" .
"This is not the information you're looking for.\n" .
Dumper($h) .
"</jedi_mind_trick>"
;
my $msg = MIME::Lite->new(
From => "$default::admin_address",
To => "$user\@bluemartini.com",
# Cc => "$default::admin_address",
Bcc => "$default::pager_address",
Subject => "[CVS-mirror] $subject",
Datestamp => 0,
Data => "$text",
);
$msg->send();
}
#
# Cleanup after ourselves since the calling script is running as the
# unprivileged mirror user.
#
END { rmtree $paramref->{'workdir'} };
__END__

View File

@@ -1,29 +0,0 @@
#!/usr/local/bin/perl -w
use Time::HiRes qw(time);
use Data::Dumper;
use strict;
use Sys::Hostname;
use FindBin;
use lib $FindBin::RealBin;
use config;
use DB::Util;
use DB::Select;
use DB::Update;
use DB::Insert;
my $runtime;
&DB::Util::connect();
$main::host_id = &DB::Util::id("mh_hostname", Sys::Hostname::hostname());
$runtime = &DB::Select::runtime($main::host_id);
$runtime = &DB::Select::runtime($main::host_id);
unless ($::mirror_delay = $runtime->{'mirror_delay'} ) { $::mirror_delay = $default::mirror_delay };
unless ($::min_scan_time = $runtime->{'min_scan_time'} ) { $::min_scan_time = $default::min_scan_time };
unless ($::throttle_time = $runtime->{'throttle_time'} ) { $::throttle_time = $default::throttle_time };
unless ($::max_addcheckins = $runtime->{'max_addcheckins'}) { $::max_addcheckins = $default::max_addcheckins };
&DB::Util::disconnect();
__END__

View File

@@ -1,291 +0,0 @@
#!/usr/local/bin/perl -w
use Time::HiRes qw(time);
use Data::Dumper;
use strict;
use Sys::Hostname;
use FindBin;
use lib $FindBin::Bin;
use config;
use proc;
use access;
use DB::Util;
use DB::Select;
use DB::Update;
use DB::Insert;
umask 0;
#
# Need /usr/local/bin in the path since sometimes
# diff3 needs to find the gnu version of diff
#
$ENV{'PATH'}='/usr/local/bin:/usr/bin';
#
# overload the die function to send me email when things go bad
# don't send mail if died in proc.pm since it does it's own error
# catching and email bitching.
#
# TODO: maybe put this in the main loop and get a list of people from
# the database to send bitch mail to
#
#
# Trap some signals and send mail to the interested parties
#
$SIG{HUP} = \&signal_handler;
$SIG{INT} = \&signal_handler;
$SIG{TERM} = \&signal_handler;
$SIG{QUIT} = \&signal_handler;
$SIG{SEGV} = \&signal_handler;
$SIG{__DIE__} = \&signal_handler;
sub signal_handler {
my $msg = join "\n--\n", (@_, "mirrord.pl is quitting now.\n");
unless ($_[0] =~ /^.* failed at .*proc.pm line \d{1,3}\.$/) {
&proc::notify("[CVS-mirror] FATAL ERROR", $msg);
}
die @_;
};
#$SIG{__DIE__} = sub {
# my $msg = join "\n--\n", (@_, "mirrord.pl is quitting now.\n");
# unless ($_[0] =~ /^.* failed at .*proc.pm line \d{1,3}\.$/) {
# &proc::notify("[CVS-mirror] FATAL ERROR", $msg);
# }
# die @_;
#};
my $checkin = {};
my $change = {};
my $runtime;
my $totalops;
my $mirror_cmd = $FindBin::Bin . "/mirror.pl";
my $paramref = {
'tmpdir' => '/tmp/mirror',
'return' => 'hashref',
# 'nomail' => 1,
'noop' => 0,
# 'log_stdout' => 1,
};
&DB::Util::connect();
$main::host_id = &DB::Util::id("mh_hostname", Sys::Hostname::hostname());
&DB::Util::disconnect();
#
# Main loop
#
while (1) {
my $loopstart = time;
my $lastcommand = defined $runtime->{'command'} ? $runtime->{'command'} : "";
&DB::Util::connect();
#
# fetch some operating parameters from the database
#
$runtime = &DB::Select::runtime($main::host_id);
unless ($::mirror_delay = $runtime->{'mirror_delay'} ) { $::mirror_delay = $default::mirror_delay };
unless ($::min_scan_time = $runtime->{'min_scan_time'} ) { $::min_scan_time = $default::min_scan_time };
unless ($::throttle_time = $runtime->{'throttle_time'} ) { $::throttle_time = $default::throttle_time };
unless ($::max_addcheckins = $runtime->{'max_addcheckins'}) { $::max_addcheckins = $default::max_addcheckins };
#
# Send some mail when the command changes
#
if (defined $runtime->{'command'} && $runtime->{'command'} ne $lastcommand ) {
&proc::notify("[CVS-mirror] $runtime->{'command'}", "");
};
#
# log an acknowledgement in the database and shutdown if the 'command' parameter = exit
#
if (defined $runtime->{'command'} && $runtime->{'command'} =~ m/exit/i ) {
&DB::Update::runtime($runtime->{'response'}, $runtime->{'id'}) if defined $runtime->{'id'};
last;
};
#
# if 'command' != pause, acknowledge and start gathering mirror information
#
unless (defined $runtime->{'command'} && $runtime->{'command'} =~ m/pause/i) {
&DB::Update::runtime($runtime->{'response'}, $runtime->{'id'}) if defined $runtime->{'id'};
#print "running...\nlast_update = $runtime->{'last_update'}\n";
#
# Get a copy of the accessconfig from the database. (used later to prevent attempting
# to mirror to a branch/module/directory/file that is closed)
#
my $accessconfig = eval &DB::Util::retrieve("expanded_accessconfig");
#
# get a list(ref) of mirrors currently labelled as 'pending'
#
my $mirrors = &DB::Select::mirrors('pending');
#
# loop over the mirror list
#
for my $m (@$mirrors) {
#
# extract data from mirror reference and store it in convenience variables
#
my $mid = $m->[0];
my $cid = $m->[1];
my $to_branch = $m->[2];
my $to_cvsroot = $m->[3];
my $offset = $m->[4];
#
# gather information about the checkin that produced this mirror object
# and cache it since it will likely be used by another mirror object in the list
#
$checkin->{$cid} = &DB::Select::checkin($cid) unless defined $checkin->{$cid};
#
# store the checkin info in convenience variables
#
my $directory = $checkin->{$cid}->{'directory'};
my $user = $checkin->{$cid}->{'user'};
my $log = $checkin->{$cid}->{'log'};
my $from_cvsroot = $checkin->{$cid}->{'cvsroot'};
#
# gather a list of changes from the source checkin that apply to this mirror
#
my $mirror_changes = &DB::Select::mirror_changes("pending", $mid);
#
# loop over the changes for this mirror object
#
for my $mc (@$mirror_changes) {
#
# extract information about the mirror-change, and store in blah blah blah...
#
my $chid = $mc->[0];
my $action = $mc->[1];
#
# gather info about this particular change and cache it...
#
$change->{$chid} = &DB::Select::change($chid) unless defined $change->{$chid};
#
# extract into convenience variables
#
my $file = $change->{$chid}->{'file'};
my $oldrev = $change->{$chid}->{'oldrev'};
my $newrev = $change->{$chid}->{'newrev'};
my $from_branch = $change->{$chid}->{'branch'};
#
# Check to see if to_branch has been EOL'd, if so, update the mirror_change status
# send a friendly reminder to the cvs administrator that he/she sucks. (Oh, and don't
# mirror this change).
#
if (@{&DB::Select::branch_eol($to_cvsroot, $to_branch)}) {
print "EOL BRANCH = ", @{&DB::Select::branch_eol($to_cvsroot, $to_branch)}, "\n";
&DB::Update::mirror_change($mid, $chid, 'to_branch_eol');
&proc::notify("[CVS-mirror] warning - to_branch_eol",
"An attempt was made to mirror $directory/$file from $from_cvsroot:$from_branch " .
"to $to_cvsroot:$to_branch (oldrev = $oldrev, newrev = $newrev, offset = \"$offset\")." .
"\n\nYou've likely EOL'd a branch and forgot to update your mirror rules. " .
"\n\n\tcheckin_id = $cid\n\tchange_id = $chid\n\tmirror_id = $mid" .
"\n\taction = $action\n\tuser = $user\n\tlog message = $log\n\n" .
"\n-- Your loving and ever-present MirrorHandler"
);
next;
print "--> branch eol\n";
}
#
# Ckeck to see if the repository is open before proceeding (blessed users are NOT mirrored)
#
#my $pre = time;
next if &access::closed($accessconfig, $to_cvsroot, $to_branch, $directory, $file);
#my $post = time;
#print "--> $to_cvsroot, $to_branch, $directory, $file -- ", $post - $pre, "\n";
print "--> $to_cvsroot, $to_branch, $directory, $file\n";
#
# if we are using the old bonsai on this machine, lets limit the number of
# addcheckin.pl process that are spawned, since each addcheckin.pl uses about
# 8MB of RAM.
#
print "--> addcheckin.pl count: ", &proc::addcheckin_count(), "\n";
sleep $::throttle_time while (&proc::addcheckin_count() > $::max_addcheckins);
#
# TODO: Check that mirror is still valid before proceeding. There is the possiblilty that the
# mirror rules may have changed between the time of the source checkin and the execution of the
# mirror, if so I should detect it and marke the mirror-change as 'mirror_rule_cancelled' or
# some such.
#
#
# build up the command that we will call to mirror this change.
# We are using 'sudo' so that we can run mirrord.pl as an unprivileged user
# and still execute cvs and patch/diff as the person who initially performed
# the checkin.
#
# we pass checkin/mirror metadata (mirror_id and change_id) so that mirror_cmd
# can update the database appropriately. logging is good.
#
my $cmd =
"sudo -u " . quotemeta($user)
. " " . quotemeta($mirror_cmd)
. " --mirror_id=" . quotemeta($mid)
. " --change_id=" . quotemeta($chid)
. " --action=" . quotemeta($action)
. " --user=" . quotemeta($user)
. " --from_branch=" . quotemeta($from_branch)
. " --from_cvsroot=" . quotemeta($from_cvsroot)
. " --to_branch=" . quotemeta($to_branch)
. " --to_cvsroot=" . quotemeta($to_cvsroot)
. " --offset=" . quotemeta($offset)
. " --directory=" . quotemeta($directory)
. " --file=" . quotemeta($file)
. " --oldrev=" . quotemeta($oldrev)
. " --newrev=" . quotemeta($newrev)
. " --log=" . quotemeta($log)
;
#
# execute cmd using our googfy little system wrapper so that we can trap
# runtime errors, capture stdout/stderr, send bitch mail, etc.
#
print "\n$cmd\n\n";
my $result = &proc::run($cmd, $paramref);
#print Dumper($result);
#
# associate the log entry (if produced) with this mirror-change
#
&DB::Insert::mirror_change_exec_map( $mid, $chid, $result->{'log_id'}) if defined $result->{'log_id'};
#
# mark the mirror-change status as 'error' if a non-zero exit status was rerurned, the process
# terminated as the result of receiving a signal, or if it core dumped.
#
if ($result->{'stderr'} || $result->{'exit_value'} || $result->{'signal_num'} || $result->{'dumped_core'}) {
&DB::Update::mirror_change($mid, $chid, 'error');
}
}
#
# get a count of changes that are still marked as pending (likely due to a branch closure) or that
# experienced an error. If no pending or error changes, mark this mirror object as complete.
#
my $not_mirrored = scalar @{&DB::Select::mirror_changes("pending", $mid)};
my $errors = scalar @{&DB::Select::mirror_changes("error", $mid)};
&DB::Update::mirror($mid, 'complete') unless ($not_mirrored || $errors);
#print "*** changes still pending = $not_mirrored\n";
#print "*** changes with errors = $errors\n";
#$totalops += scalar @$mirror_changes;
#print "changes details -- ", Dumper($checkin);
#print "checkins -- ", scalar keys %$checkin, "\n";
#print "changes -- ", scalar keys %$change, "\n";
#print "mirrors -- ", scalar @$mirrors, "\n" if $mirrors;
#print "totalops -- ", $totalops, "\n" if $totalops;
}
} else {
#
# acknowledge the 'pause' command
#
&DB::Update::runtime($runtime->{'response'}, $runtime->{'id'}) if defined $runtime->{'id'};
print "paused...\n";
}
&DB::Util::disconnect();
my $looptime = time - $loopstart;
print "--> mirror loop duration: $looptime seconds.\n";
$checkin = {};
$change = {};
#
# sleep a bit, if we finished before the min_scan_time so that we don;t needlessly trash the db and network
#
if ($looptime < $::min_scan_time) {
print "--> Sleeping for ", $::min_scan_time - $looptime, " seconds.\n\n";
sleep $::min_scan_time - $looptime;
}
}
print "exiting...\n";
&DB::Util::disconnect();
__END__

View File

@@ -1,265 +0,0 @@
package proc;
use strict;
use Data::Dumper;
use Time::HiRes qw(time);
use File::Path;
use FindBin;
use File::Basename;
use Cwd;
use MIME::Lite;
#
# Simulate try-catch-finally block (these subs need to be before the rest
# or perl pitches a bitch. Example syntax:
#
# try {
# <some perl that can fail or that has an "or die">
# } and catch {
# <some perl code to execute if the code in the above try block generated an error>
# } or finally {
# <some code to always execute regardless of errors (i'm not sure when to ever use this)>
# };
#
# the trailing ";" *IS* required, unlike other blocks.
#
sub try (&) {
my $code = shift;
eval { &{$code} };
&notify("[CVS-mirror]: non-fatal error", $@) if $@;
}
sub catch (&) {
my $code = shift;
eval { &{$code} };
return 0;
}
sub finally (&) {
my $code = shift;
eval { &{$code} };
}
sub run {
my ($cmd, $href) = @_;
my ($return);
my ($tmpdir, $workdir, $homedir, $debug, $noop, $nomail, $log_always, $log_stdout, $keep_dir, $exit_code);
print "#-debug-# ",Dumper($href) if $href->{"debug"};
#
# define defaults for options
#
unless ($tmpdir = $href->{"tmpdir"} ) { $tmpdir = cwd }
unless ($workdir = $href->{"workdir"} ) { $workdir = $tmpdir."/".$FindBin::Script."-".time."-".$$ }
unless ($homedir = $href->{"homedir"} ) { $homedir = cwd }
unless ($debug = $href->{"debug"} ) { $debug = 0 }
unless ($noop = $href->{"noop"} ) { $noop = 0 }
unless ($nomail = $href->{"nomail"} ) { $nomail = 0 }
unless ($log_always = $href->{"log_always"}) { $log_always = 0 }
unless ($log_stdout = $href->{"log_stdout"}) { $log_stdout = 0 }
unless ($keep_dir = $href->{"keep_dir"} ) { $keep_dir = 0 }
unless ($return->{'type'} = $href->{"return"}) { $return->{'type'} = 'array' }
if ($debug) {
print "#-debug-# tmpdir:\t$tmpdir\n";
print "#-debug-# workdir:\t$workdir\n";
print "#-debug-# homedir:\t$homedir\n";
print "#-debug-# debug:\t$debug\n";
print "#-debug-# noop: \t$noop\n";
print "#-debug-# nomail:\t$nomail\n";
print "#-debug-# log_always:\t$log_always\n";
print "#-debug-# log_stdout:\t$log_stdout\n";
print "#-debug-# keep_dir:\t$keep_dir\n";
print "#-debug-# return type:\t$return->{'type'}\n";
}
#
# reformat command
#
my $ocmd = $cmd;
$cmd = "echo" if $noop;
$cmd = "(".$cmd.") 1>stdout 2>stderr";
#
# make workdir and chdir into it
#
print "#-debug-# started from:\t".cwd."\n" if $debug;
unless ($href->{'workdir'} && $keep_dir && -d $workdir) {
try {
mkpath $workdir, $debug or die "\nfailed to create \"$workdir\": $!\n";
} and catch {
$return->{'stderr'} .= "\nfailed to create \"$workdir\"\n";
$return->{'exit_value'} += 666;
goto RETURN;
};
}
try {
chdir $workdir or die "failed to chdir to \"$workdir\": $!\n";
} and catch {
$return->{'stderr'} .= "\nfailed to chdir to \"$workdir\": $!\n";
$return->{'exit_value'} += 666;
goto RETURN;
};
print "#-debug-# working in:\t".cwd."\n" if $debug;
#
# execute command and record exit status
#
print "#-debug-# executing:\t$cmd\n" if $debug;
print ("#-debug-# send mail:\t", $nomail?"no":"yes","\n") if $debug;
unless ($nomail) {
try {
$exit_code = system($cmd) and die "\"$cmd\" failed";
}
} else {
$exit_code = system($cmd);
}
$return->{'exit_value'} = $exit_code >> 8;
$return->{'signal_num'} = $exit_code & 127;
$return->{'dumped_core'} = $exit_code & 128;
#
# record STDOUT
#
try {
open(OUT, "<stdout") or die "failed to open \"$workdir/stdout\": $!\n";
} and catch {
$return->{'stderr'} .= "\nfailed to open \"$workdir/stdout\"\n";
$return->{'exit_value'} += 666;
goto RETURN;
};
while (<OUT>) { $return->{'stdout'} .= $_ }
try {
close(OUT) or die "failed to close \"$workdir/stdout\": $!\n";
} and catch {
$return->{'stderr'} .= "\nfailed to close \"$workdir/stdout\"\n";
};
try {
unlink "stdout" or die "failed to delete \"$workdir/stdout\"\n";
} and catch {
$return->{'stderr'} .= "\nfailed to delete \"$workdir/stdout\"\n";
};
#
# record STDERR
#
try {
open(ERR, "<stderr") or die "failed to open \"$workdir/stderr\": $!\n";
} and catch {
$return->{'stderr'} .= "\nfailed to open \"$workdir/stderr\"\n";
$return->{'exit_value'} += 666;
goto RETURN;
};
while (<ERR>) { $return->{'stderr'} .= $_ }
try {
close(ERR) or die "failed to close \"$workdir/stderr\": $!\n";
} and catch {
$return->{'stderr'} .= "\nfailed to close \"$workdir/stderr\"\n";
};
try {
unlink "stderr" or die "failed to delete \"$workdir/stderr\"\n";
} and catch {
$return->{'stderr'} .= "\nfailed to delete \"$workdir/stderr\"\n";
};
#
# return to homedir
#
try {
chdir $homedir or die "failed to chdir to \"$homedir\": $!\n";
} and catch {
$return->{'stderr'} .= "\nfailed to chdir to \"$homedir\"\n";
};
print "#-debug-# returned to:\t".cwd."\n" if $debug;
#
# cleanup workdir unless we're directed to keep it
#
unless ($keep_dir) {
try { rmtree $workdir or die "failed to delete \"$workdir\": $!\n" };
}
#
# Return stdout, stderr, and exit status in the form requested.
# Bitch and die, if an invalid form was requested
#
RETURN:
if ($log_always) {
$return->{'log_id'} = &log($ocmd, $return);
} elsif ($log_stdout &&
$return->{'stdout'} ||
$return->{'stderr'} ||
$return->{'exit_value'} ||
$return->{'signal_num'} ||
$return->{'dumped_core'}) {
$return->{'log_id'} = &log($ocmd, $return);
} elsif ($return->{'stderr'} ||
$return->{'exit_value'} ||
$return->{'signal_num'} ||
$return->{'dumped_core'}) {
$return->{'log_id'} = &log($ocmd, $return);
} else {
# don't log anything
}
if ($return->{'type'} =~ /^array$|^list$/i) {
return (
$return->{'stdout'},
$return->{'stderr'},
$return->{'exit_value'},
$return->{'signal_num'},
$return->{'dumped_core'},
$return->{'log_id'}
);
} elsif ($return->{'type'} =~ /^arrayref$|^listref$/i) {
return [
$return->{'stdout'},
$return->{'stderr'},
$return->{'exit_value'},
$return->{'signal_num'},
$return->{'dumped_core'},
$return->{'log_id'}
];
} elsif ($return->{'type'} =~ /^hashref$/i) {
delete $return->{'type'};
return $return ;
} elsif ($return->{'type'} =~ /^hash$/i) {
delete $return->{'type'};
return %$return ;
} else {
try { die (
"Invalid return type requested ($return->{'type'}).\n",
"Valid return types are:\n",
"\tARRAY or LIST (default)\n",
"\tHASH\n",
"\tARRAYREF or LISTREF\n",
"\tHASHREF\n\n",
);
};
}
}
sub log {
my ($cmd, $h) = @_;
return &DB::Insert::exec_log(
$cmd,
$h->{'stdout'},
$h->{'stderr'},
$h->{'exit_value'},
$h->{'signal_num'},
$h->{'dumped_core'}
);
}
sub notify {
#
# TODO: make the from/to headers variables
#
my ($subject, $text) = @_;
my $msg = MIME::Lite->new(
From => $default::admin_address,
To => $default::pager_address,
Subject => $subject,
Datestamp => 0,
Data => $text
);
$msg->send();
}
sub addcheckin_count {
my $count = `ps -ef | grep -c "[a]ddcheckin\.pl"`;
chomp $count;
return $count;
}
return 1;

View File

@@ -1,19 +0,0 @@
# phpMyAdmin MySQL-Dump
# version 2.2.1
# http://phpwizard.net/phpMyAdmin/
# http://phpmyadmin.sourceforge.net/ (download page)
#
# Host: bonsai2
# Generation Time: Feb 12, 2002 at 10:02 PM
# Server version: 3.23.46
# PHP Version: 4.0.3pl1
# Database : `bonsai`
#
# Dumping data for table `mh_command`
#
INSERT INTO `mh_command` VALUES (1, 'run');
INSERT INTO `mh_command` VALUES (2, 'pause');
INSERT INTO `mh_command` VALUES (3, 'exit');

View File

@@ -1,32 +0,0 @@
# phpMyAdmin MySQL-Dump
# version 2.2.1
# http://phpwizard.net/phpMyAdmin/
# http://phpmyadmin.sourceforge.net/ (download page)
#
# Host: bonsai2
# Generation Time: Feb 12, 2002 at 10:00 PM
# Server version: 3.23.46
# PHP Version: 4.0.3pl1
# Database : `bonsai`
#
# Dumping data for table `status`
#
INSERT INTO `status` VALUES (1, 'pending');
INSERT INTO `status` VALUES (2, 'nomirror');
INSERT INTO `status` VALUES (3, 'complete');
INSERT INTO `status` VALUES (4, 'cvs_merge');
INSERT INTO `status` VALUES (5, 'error');
INSERT INTO `status` VALUES (6, 'to_branch_eol');
INSERT INTO `status` VALUES (7, 'missing');
INSERT INTO `status` VALUES (8, 'prev_add_same');
INSERT INTO `status` VALUES (9, 'prev_rm');
INSERT INTO `status` VALUES (10, 'prev_add_different');
INSERT INTO `status` VALUES (11, 'conflict');
INSERT INTO `status` VALUES (12, 'diff_patch');
INSERT INTO `status` VALUES (13, 'noop');
INSERT INTO `status` VALUES (14, 'diff3_merge');
INSERT INTO `status` VALUES (15, 'non_merge_overwrite');
INSERT INTO `status` VALUES (16, 'building_mirror');

View File

@@ -1,377 +0,0 @@
# phpMyAdmin MySQL-Dump
# version 2.2.1
# http://phpwizard.net/phpMyAdmin/
# http://phpmyadmin.sourceforge.net/ (download page)
#
# Host: bonsai2
# Generation Time: Feb 12, 2002 at 09:31 PM
# Server version: 3.23.46
# PHP Version: 4.0.3pl1
# Database : `bonsai`
# --------------------------------------------------------
#
# Table structure for table `accessconfig`
#
CREATE TABLE `accessconfig` (
`id` int(10) unsigned NOT NULL auto_increment,
`time` timestamp(14) NOT NULL,
`cvsroot_id` int(10) unsigned NOT NULL default '0',
`rev` varchar(128) NOT NULL default '',
`value` mediumtext NOT NULL,
PRIMARY KEY (`id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `branch`
#
CREATE TABLE `branch` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(64) binary NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `value` (`value`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `change`
#
CREATE TABLE `change` (
`id` int(10) unsigned NOT NULL auto_increment,
`checkin_id` int(10) unsigned NOT NULL default '0',
`file_id` int(10) unsigned NOT NULL default '0',
`oldrev` varchar(128) NOT NULL default '',
`newrev` varchar(128) NOT NULL default '',
`branch_id` int(10) unsigned NOT NULL default '0',
PRIMARY KEY (`id`),
KEY `checkin_id` (`checkin_id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `checkin`
#
CREATE TABLE `checkin` (
`id` int(10) unsigned NOT NULL auto_increment,
`user_id` int(10) unsigned NOT NULL default '0',
`time` int(10) unsigned NOT NULL default '0',
`directory_id` int(10) unsigned NOT NULL default '0',
`log_id` int(10) unsigned NOT NULL default '0',
`cvsroot_id` int(10) unsigned NOT NULL default '0',
PRIMARY KEY (`id`),
KEY `time` (`time`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `cvsroot`
#
CREATE TABLE `cvsroot` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(128) binary NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `value` (`value`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `cvsroot_branch_map_eol`
#
CREATE TABLE `cvsroot_branch_map_eol` (
`cvsroot_id` int(10) unsigned NOT NULL default '0',
`branch_id` int(10) unsigned NOT NULL default '0',
`timestamp` timestamp(14) NOT NULL,
PRIMARY KEY (`cvsroot_id`,`branch_id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `directory`
#
CREATE TABLE `directory` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(128) binary NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `value` (`value`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `exec_log`
#
CREATE TABLE `exec_log` (
`id` int(10) unsigned NOT NULL auto_increment,
`time` int(10) unsigned NOT NULL default '0',
`command` text NOT NULL,
`stdout` mediumtext,
`stderr` mediumtext,
`exit_value` smallint(5) unsigned default '0',
`signal_num` tinyint(3) unsigned default '0',
`dumped_core` tinyint(3) unsigned default '0',
PRIMARY KEY (`id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `expanded_accessconfig`
#
CREATE TABLE `expanded_accessconfig` (
`id` int(10) unsigned NOT NULL auto_increment,
`time` timestamp(14) NOT NULL,
`value` mediumtext NOT NULL,
PRIMARY KEY (`id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `expanded_mirrorconfig`
#
CREATE TABLE `expanded_mirrorconfig` (
`id` int(10) unsigned NOT NULL auto_increment,
`time` timestamp(14) NOT NULL,
`value` mediumtext NOT NULL,
PRIMARY KEY (`id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `file`
#
CREATE TABLE `file` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(128) binary NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `value` (`value`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `group`
#
CREATE TABLE `group` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(64) binary NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `value` (`value`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `group_user_map`
#
CREATE TABLE `group_user_map` (
`group_id` int(10) unsigned NOT NULL default '0',
`user_id` int(10) unsigned NOT NULL default '0'
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `log`
#
CREATE TABLE `log` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` text NOT NULL,
PRIMARY KEY (`id`),
KEY `value` (`value`(25))
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `loginfo_performance`
#
CREATE TABLE `loginfo_performance` (
`checkin_id` int(10) unsigned NOT NULL default '0',
`time` float unsigned NOT NULL default '0',
PRIMARY KEY (`checkin_id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `mh_command`
#
CREATE TABLE `mh_command` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(32) NOT NULL default '',
PRIMARY KEY (`id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `mh_hostname`
#
CREATE TABLE `mh_hostname` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(64) NOT NULL default '',
PRIMARY KEY (`id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `mh_runtime_info`
#
CREATE TABLE `mh_runtime_info` (
`id` int(10) unsigned NOT NULL auto_increment,
`time` int(10) unsigned NOT NULL default '0',
`last_update` timestamp(14) NOT NULL,
`mh_hostname_id` int(10) unsigned NOT NULL default '0',
`mh_command_id` int(10) unsigned NOT NULL default '0',
`mh_command_response` int(10) unsigned NOT NULL default '0',
`mirror_delay` smallint(5) unsigned NOT NULL default '0',
`min_scan_time` smallint(5) unsigned NOT NULL default '0',
`throttle_time` smallint(5) unsigned NOT NULL default '0',
`max_addcheckins` smallint(5) unsigned NOT NULL default '0',
PRIMARY KEY (`id`),
KEY `id` (`id`,`time`,`mh_hostname_id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `mirror`
#
CREATE TABLE `mirror` (
`id` int(10) unsigned NOT NULL auto_increment,
`checkin_id` int(10) unsigned NOT NULL default '0',
`branch_id` int(10) unsigned NOT NULL default '0',
`cvsroot_id` int(10) unsigned NOT NULL default '0',
`offset_id` int(10) unsigned NOT NULL default '0',
`status_id` int(10) unsigned NOT NULL default '0',
PRIMARY KEY (`id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `mirror_change_exec_map`
#
CREATE TABLE `mirror_change_exec_map` (
`mirror_id` int(10) unsigned NOT NULL default '0',
`change_id` int(10) unsigned NOT NULL default '0',
`exec_log_id` int(10) unsigned NOT NULL default '0'
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `mirror_change_map`
#
CREATE TABLE `mirror_change_map` (
`mirror_id` int(10) unsigned NOT NULL default '0',
`change_id` int(10) unsigned NOT NULL default '0',
`type_id` int(10) unsigned NOT NULL default '0',
`status_id` int(10) unsigned NOT NULL default '0',
KEY `mirror_id` (`mirror_id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `mirrorconfig`
#
CREATE TABLE `mirrorconfig` (
`id` int(10) unsigned NOT NULL auto_increment,
`time` timestamp(14) NOT NULL,
`cvsroot_id` int(10) unsigned NOT NULL default '0',
`rev` varchar(128) NOT NULL default '',
`value` mediumtext NOT NULL,
PRIMARY KEY (`id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `modules`
#
CREATE TABLE `modules` (
`id` int(10) unsigned NOT NULL auto_increment,
`time` timestamp(14) NOT NULL,
`cvsroot_id` int(10) unsigned NOT NULL default '0',
`rev` varchar(128) NOT NULL default '',
`value` mediumtext NOT NULL,
PRIMARY KEY (`id`),
KEY `cvsroot_id` (`cvsroot_id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `offset`
#
CREATE TABLE `offset` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(128) binary NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `value` (`value`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `status`
#
CREATE TABLE `status` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(32) binary NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `value` (`value`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `temp_commitinfo`
#
CREATE TABLE `temp_commitinfo` (
`cwd` varchar(255) NOT NULL default '',
`user_id` int(10) unsigned NOT NULL default '0',
`time` int(10) unsigned NOT NULL default '0',
`directory_id` int(10) unsigned NOT NULL default '0',
`cvsroot_id` int(10) unsigned NOT NULL default '0',
`files` text NOT NULL,
`status` varchar(32) NOT NULL default '',
PRIMARY KEY (`cwd`,`user_id`,`time`,`directory_id`,`cvsroot_id`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `type`
#
CREATE TABLE `type` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(16) binary NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `value` (`value`)
) TYPE=MyISAM;
# --------------------------------------------------------
#
# Table structure for table `user`
#
CREATE TABLE `user` (
`id` int(10) unsigned NOT NULL auto_increment,
`value` varchar(32) NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `value` (`value`)
) TYPE=MyISAM;

View File

@@ -1,804 +0,0 @@
#!/usr/bin/perl -w
# cvsblame.cgi -- cvsblame with logs as popups and allowing html in comments.
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
# Created: Steve Lamm <slamm@netscape.com>, 12-Sep-97.
# Modified: Marc Byrd <byrd@netscape.com> , 19971030.
#
# Arguments (passed via GET or POST):
# file - path to file name (e.g. ns/cmd/xfe/Makefile)
# root - cvs root (e.g. /warp/webroot)
# - default includes /m/src/ and /h/rodan/cvs/repository/1.0
# rev - revision (default is the latest version)
# line_nums - boolean for line numbers on/off (use 1,0).
# (1,on by default)
# use_html - boolean for html comments on/off (use 1,0).
# (0,off by default)
# sanitize - path to sanitization dictionary
# (e.g. /warp2/webdoc/projects/bonsai/dictionary/sanitization.db)
# mark - highlight a line
#
use strict;
# Shut up misguided -w warnings about "used only once". "use vars" just
# doesn't work for me.
sub sillyness {
my $zz;
$zz = $::progname;
$zz = $::revision_ctime;
$zz = $::revision_log;
}
require 'CGI.pl';
require 'cvsblame.pl';
# Cope with the cookie and print the header, first thing. That way, if
# any errors result, they will show up for the user.
print "Content-Type:text/html\n";
if ($ENV{REQUEST_METHOD} eq 'POST' and defined $::FORM{set_line}) {
# Expire the cookie 5 months from now
print "Set-Cookie: line_nums="
. &ExpectOnOff("set_line", $::FORM{set_line}) . "; expires="
. &toGMTString(time + 86400 * 152) . "; path=/\n";
}
# Some Globals
#
my $Head = 'CVS Blame';
my $SubHead = '';
my @src_roots = getRepositoryList();
# Init byrd's 'feature' to allow html in comments
#
my $opt_html_comments = &html_comments_init();
# Handle the "file" argument
#
my $filename = '';
$filename = $::FORM{file} if defined $::FORM{file};
if ($filename eq '')
{
print "\n";
&print_usage;
exit;
}
my ($file_head, $file_tail) = $filename =~ m@(.*/)?(.+)@;
$file_head = '' if !defined($file_head);
my $url_filename = url_quote($filename);
my $url_file_tail = url_quote($file_tail);
# Handle the "rev" argument
#
$::opt_rev = '';
$::opt_rev = &SanitizeRevision($::FORM{rev}) if
defined $::FORM{rev} and $::FORM{rev} ne 'HEAD';
my $revstr = '';
$revstr = "&rev=$::opt_rev" unless $::opt_rev eq '';
my $browse_revtag = 'HEAD';
$browse_revtag = $::opt_rev if ($::opt_rev =~ /[A-Za-z]/);
my $revision = '';
# Handle the "root" argument
#
my $root = $::FORM{root};
if (defined $root and $root ne '') {
$root =~ s|/$||;
validateRepository($root);
if (-d $root) {
unshift(@src_roots, $root);
} else {
print "\n";
&print_top;
print "Error: Root, " . &html_quote($root) .
", is not a directory.<BR><BR>\n";
print "</BODY></HTML>\n";
&print_bottom;
exit;
}
}
# Find the rcs file
#
my $rcs_filename;
my $found_rcs_file = 0;
foreach (@src_roots) {
$root = $_;
$rcs_filename = "$root/$filename,v";
$rcs_filename = Fix_BonsaiLink($rcs_filename);
$found_rcs_file = 1, last if -r $rcs_filename;
$rcs_filename = "$root/${file_head}Attic/$file_tail,v";
$found_rcs_file = 1, last if -r $rcs_filename;
}
unless ($found_rcs_file) {
print "\n";
&print_top;
my $escaped_filename = html_quote($filename);
my $shell_filename = shell_escape($filename);
print STDERR "cvsblame.cgi: Rcs file, $shell_filename, does not exist.\n";
print "Invalid filename: $escaped_filename.\n";
&print_bottom;
exit;
}
&ChrootFilename($root, $rcs_filename);
my $rcs_path;
($rcs_path) = $rcs_filename =~ m@$root/(.*)/.+?,v@;
# Parse the rcs file ($::opt_rev is passed as a global)
#
$revision = &parse_cvs_file($rcs_filename);
my $file_rev = $revision;
my @text = &extract_revision($revision);
if ($#text != $#::revision_map) {
print "\n";
die "$::progname: Internal consistency error"
}
# Raw data opt (so other scripts can parse and play with the data)
if (defined $::FORM{data}) {
print "\n";
&print_raw_data;
exit;
}
print "Last-Modified: ".time2str("%a, %d %b %Y %T %Z", str2time($::revision_ctime{$::opt_rev}), "GMT")."\n";
print "Expires: ".time2str("%a, %d %b %Y %T %Z", time+1200, "GMT")."\n";
print "\n";
#ENDHEADERS!!
# Handle the "line_nums" argument
#
my $opt_line_nums = 1;
if (defined $::COOKIE{line_nums}) {
$opt_line_nums = 0 if $::COOKIE{line_nums} eq 'off';
$opt_line_nums = 1 if $::COOKIE{line_nums} eq 'on';
}
if (defined $::FORM{line_nums}) {
$opt_line_nums = 0 if $::FORM{line_nums} =~ /off|no|0/i;
$opt_line_nums = 1 if $::FORM{line_nums} =~ /on|yes|1/i;
}
# Option to make links to included files
my $opt_includes = 0;
$opt_includes = 1 if defined $::FORM{includes} and
$::FORM{includes} =~ /on|yes|1/i;
$opt_includes = 1 if $opt_includes and $file_tail =~ /(.c|.h|.cpp)$/;
my $use_html = 0;
$use_html = 1 if defined $::FORM{use_html} and $::FORM{use_html} eq '1';
# Handle the "mark" argument
#
my %mark_line;
my $mark_arg = '';
$mark_arg = &SanitizeMark($::FORM{mark}) if defined $::FORM{mark};
foreach my $mark (split ',', $mark_arg) {
my ($begin, $end);
if ($mark =~ m/^(\d*)-(\d*)$/) {
$begin = $1;
$end = $2;
$begin = 1 if $begin eq '';
$end = $#text + 1 if $end eq '' or $end > $#text + 1;
next if $begin >= $end;
$mark_line{$begin} = 'begin';
$mark_line{$end} = 'end';
} else {
$mark_line{$mark} = 'single';
}
}
# Start printing out the page
#
&print_top;
print Param('bannerhtml', 1);
# Print link at top for directory browsing
#
print q(
<TABLE BORDER=0 CELLPADDING=5 CELLSPACING=0 WIDTH="100%">
<TR>
<TD ALIGN=LEFT VALIGN=CENTER>
<NOBR><FONT SIZE="+2"><B>
CVS Blame
</B></FONT></NOBR>
<BR><B>
);
my $link_path = "";
foreach my $path (split('/',$rcs_path)) {
# Customize this translation
$link_path .= url_encode2($path).'/';
my $lxr_path = Fix_LxrLink($link_path);
print "<A HREF='$lxr_path'>$path</a>/ ";
}
my $lxr_path = Fix_LxrLink("$link_path$file_tail");
print "<A HREF='$lxr_path'>$file_tail</a> ";
my $graph_cell = Param('cvsgraph') ? <<"--endquote--" : "";
</TR><TR>
<TD NOWRAP>
<A HREF="cvsgraph.cgi?file=$url_filename">Revision Graph</A>
</TD>
--endquote--
print " (<A HREF='cvsblame.cgi?file=$url_filename&rev=$revision&root=$root'";
print " onmouseover='return log(event,\"$::prev_revision{$revision}\",\"$revision\");'" if $::use_layers;
print " onmouseover=\"showMessage('$revision','top')\" id=\"line_top\"" if $::use_dom;
print ">";
print "$browse_revtag:" unless $browse_revtag eq 'HEAD';
print $revision if $revision;
print "</A>)";
print qq(
</B>
</TD>
<TD ALIGN=RIGHT VALIGN=TOP WIDTH="1%">
<TABLE BORDER CELLPADDING=10 CELLSPACING=0>
<TR>
<TD NOWRAP BGCOLOR="#FAFAFA">
<TABLE BORDER=0 CELLPADDING=0 CELLSPACING=0>
<TR>
<TD NOWRAP>
<A HREF="$lxr_path">LXR: Cross Reference</A>
</TD>
</TR><TR>
<TD NOWRAP>
<A HREF="cvslog.cgi?file=$url_filename$revstr">Full Change Log</A>
</TD>
$graph_cell
</TR>
</TABLE>
</TD>
</TR>
</TABLE>
</TD>
</TR>
</TABLE>
);
my $open_table_tag =
'<TABLE BORDER=0 CELLPADDING=0 CELLSPACING=0 WIDTH="100%">';
print "$open_table_tag<TR><TD colspan=3><PRE>";
# Print each line of the revision, preceded by its annotation.
#
my $count = $#::revision_map;
if ($count <= 0) {
$count = 1;
}
my $line_num_width = int(log($count)/log(10)) + 1;
my $revision_width = 3;
my $author_width = 5;
my $line = 0;
my %usedlog;
$usedlog{$revision} = 1;
my $old_revision = 0;
my $row_color = '';
my $lines_in_table = 0;
my $inMark = 0;
my $rev_count = 0;
foreach $revision (@::revision_map)
{
my $text = $text[$line++];
$usedlog{$revision} = 1;
$lines_in_table++;
if ($opt_html_comments) {
# Don't escape HTML in C/C++ comments
$text = &leave_html_comments($text);
} else {
$text =~ s/&/&amp;/g;
$text =~ s/</&lt;/g;
$text =~ s/>/&gt;/g;
}
# Add a link to traverse to included files
$text = &link_includes($text) if $opt_includes;
my $output = '';
# Highlight lines
my $mark_cmd;
if (defined($mark_cmd = $mark_line{$line}) and $mark_cmd ne 'end') {
$output .= '</TD></TR><TR><TD BGCOLOR=LIGHTGREEN WIDTH="100%"><PRE>';
$inMark = 1;
}
if ($old_revision ne $revision and $line != 1) {
if ($row_color eq '') {
$row_color=' BGCOLOR="#e7e7e7"';
} else {
$row_color='';
}
if (not $inMark) {
if ($lines_in_table > 100) {
$output .= "</TD></TR></TABLE>$open_table_tag<TR><TD colspan=3$row_color><PRE>";
$lines_in_table=0;
} else {
$output .= "</TD></TR><TR><TD colspan=3$row_color><PRE>";
}
}
} elsif ($lines_in_table > 200 and not $inMark) {
$output .= "</TD></TR></TABLE>$open_table_tag<TR><TD colspan=3$row_color><PRE>";
$lines_in_table=0;
}
$output .= "<A NAME=$line></A>";
$output .= sprintf("%${line_num_width}s ", $line) if $opt_line_nums;
if ($old_revision ne $revision or $rev_count > 20) {
$revision_width = max($revision_width,length($revision));
if ($::prev_revision{$revision}) {
$output .= "<A HREF=\"cvsview2.cgi?diff_mode=context&whitespace_mode=show&root=$root&subdir=$rcs_path&command=DIFF_FRAMESET&file=$url_file_tail&rev2=$revision&rev1=$::prev_revision{$revision}\"";
} else {
$output .= "<A HREF=\"cvsblame.cgi?file=$url_filename&rev=$revision&root=$root\"";
}
$output .= " onmouseover='return log(event,\"$::prev_revision{$revision}\",\"$revision\");'" if $::use_layers;
$output .= " onmouseover=\"showMessage('$revision','$line')\" id=\"line_$line\"" if $::use_dom;
$output .= ">";
my $author = $::revision_author{$revision};
$author =~ s/%.*$//;
$author_width = max($author_width,length($author));
$output .= sprintf("%-${author_width}s ", $author);
$output .= "$revision</A> ";
$output .= ' ' x ($revision_width - length($revision));
$old_revision = $revision;
$rev_count = 0;
} else {
$output .= ' ' . ' ' x ($author_width + $revision_width);
}
$rev_count++;
$output .= "$text";
# Close the highlighted section
if (defined $mark_cmd and $mark_cmd ne 'begin') {
chop($output);
$output .= "</TD></TR><TR><TD colspan=3$row_color><PRE>";
$inMark = 0;
}
print $output;
}
print "</TD></TR></TABLE>\n";
if ($::use_layers || $::use_dom) {
# Write out cvs log messages as a JS variables
# or hidden <div>'s
print qq|<SCRIPT $::script_type><!--\n| if $::use_layers;
while (my ($revision, $junk) = each %usedlog) {
# Create a safe variable name for a revision log
my $revisionName = $revision;
$revisionName =~ tr/./_/;
my $log = $::revision_log{$revision};
$log =~ s/([^\n\r]{80})([^\n\r]*)/$1\n$2/g if $::use_layers;
$log = html_quote($log);
$log = MarkUpText($log);
$log =~ s/\n|\r|\r\n/<BR>/g;
$log =~ s/"/\\"/g if $::use_layers;
# Write JavaScript variable for log entry (e.g. log1_1 = "New File")
my $author = $::revision_author{$revision};
$author =~ tr/%/@/;
my $author_email = EmailFromUsername($author);
print "<div id=\"rev_$revision\" class=\"log_msg\" style=\"display:none\">" if $::use_dom;
print "log$revisionName = \"" if $::use_layers;
print "<b>$revision</b> &lt;<a href='mailto:$author_email'>$author</a>&gt;"
." <b>$::revision_ctime{$revision}</b><BR>"
."<SPACER TYPE=VERTICAL SIZE=5>$log";
print "\";\n" if $::use_layers;
print "</div>\n" if $::use_dom;
}
print "//--></SCRIPT>" if $::use_layers;
}
&print_bottom;
## END of main script
sub max {
my ($a, $b) = @_;
return ($a > $b ? $a : $b);
}
sub print_top {
my ($title_text) = "for " . &html_quote($file_tail) . " (";
$title_text .= "$browse_revtag:" unless $browse_revtag eq 'HEAD';
$title_text .= $revision if $revision;
$title_text .= ")";
$title_text =~ s/\(\)//;
$| = 1;
print "<HTML><HEAD><TITLE>CVS Blame $title_text</TITLE>";
print <<__TOP__ if $::use_layers;
<SCRIPT $::script_type><!--
var event = 0; // Nav3.0 compatibility
document.loaded = false;
function finishedLoad() {
if (parseInt(navigator.appVersion) < 4 ||
navigator.userAgent.toLowerCase().indexOf("msie") != -1) {
return true;
}
document.loaded = true;
document.layers['popup'].visibility='hide';
return true;
}
function revToName (rev) {
revName = rev + "";
revArray = revName.split(".");
return revArray.join("_");
}
function log(event, prev_rev, rev) {
if (parseInt(navigator.appVersion) < 4 ||
navigator.userAgent.toLowerCase().indexOf("msie") != -1) {
return true;
}
var l = document.layers['popup'];
var guide = document.layers['popup_guide'];
if (event.target.text.length > max_link_length) {
max_link_length = event.target.text.length;
guide.document.write("<PRE>" + event.target.text);
guide.document.close();
popup_offset = guide.clip.width;
}
if (document.loaded) {
l.document.write("<TABLE BORDER=0 CELLSPACING=0 CELLPADDING=3><TR><TD BGCOLOR=#F0A000>");
l.document.write("<TABLE BORDER=0 CELLSPACING=0 CELLPADDING=6><TR><TD BGCOLOR=#FFFFFF><tt>");
l.document.write(eval("log" + revToName(rev)) + "</TD></TR></TABLE>");
l.document.write("</td></tr></table>");
l.document.close();
}
if(event.target.y > window.innerHeight + window.pageYOffset - l.clip.height) {
l.top = (window.innerHeight + window.pageYOffset - (l.clip.height + 15));
} else {
l.top = event.target.y - 9;
}
l.left = event.target.x + popup_offset;
l.visibility="show";
return true;
}
file_tail = "$file_tail";
popup_offset = 5;
max_link_length = 0;
initialLayer = "<TABLE BORDER=0 CELLSPACING=0 CELLPADDING=3><TR><TD BGCOLOR=#F0A000><TABLE BORDER=0 CELLSPACING=0 CELLPADDING=6><TR><TD BGCOLOR=#FFFFFF><B>Page loading...please wait.</B></TD></TR></TABLE></td></tr></table>";
//--></SCRIPT>
</HEAD>
<BODY onLoad="finishedLoad();" BGCOLOR="#FFFFFF" TEXT="#000000" LINK="#0000EE" VLINK="#551A8B" ALINK="#F0A000">
<LAYER SRC="javascript:initialLayer" NAME='popup' onMouseOut="this.visibility='hide';" LEFT=0 TOP=0 BGCOLOR='#FFFFFF' VISIBILITY='hide'></LAYER>
<LAYER SRC="javascript:initialLayer" NAME='popup_guide' onMouseOut="this.visibility='hide';" LEFT=0 TOP=0 VISIBILITY='hide'></LAYER>
__TOP__
print <<__TOP__ if $::use_dom;
<script $::script_type><!--
var r
function showMessage(rev,line) {
if (r) {
r.style.display='none'
}
r = document.getElementById('rev_'+rev)
if (!r)
return
var l = document.getElementById('line_'+line)
var t = l.offsetTop
var p = l.offsetParent
while (p.tagName != 'BODY') {
t = t + p.offsetTop
p = p.offsetParent
}
r.style.top = t
r.style.left = l.offsetLeft + l.offsetWidth + 20
r.style.display=''
}
function hideMessage() {
if (r) {
r.style.display='none'
}
}
//--></script>
<style type="text/css">
body {
background-color: white;
color: black;
}
a:link {
color: blue;
}
a:visited {
color: purple;
}
a:active {
color: orange;
}
.log_msg {
border-style: solid;
border-color: #F0A000;
background-color: #FFFFFF;
padding: 5;
position: absolute;
}
pre {
margin: 0;
}
</style>
</head>
<body onclick="hideMessage()">
__TOP__
print '<BODY BGCOLOR="#FFFFFF" TEXT="#000000" LINK="#0000EE" VLINK="#551A8B" ALINK="#F0A000">' if not ($::use_layers || $::use_dom);
} # print_top
sub print_usage {
my ($linenum_message) = '';
my ($new_linenum, $src_roots_list);
my ($title_text) = "Usage";
if ($ENV{REQUEST_METHOD} eq 'POST' and defined $::FORM{set_line}) {
# Expire the cookie 5 months from now
my $set_cookie = "Set-Cookie: line_nums="
. &ExpectOnOff("set_line", $::FORM{set_line}) . "; expires="
.&toGMTString(time + 86400 * 152)."; path=/";
# XXX Hey, nothing is done with this handy cookie string! ### XXX
}
if ( not defined $::COOKIE{line_nums} and not defined $::FORM{set_line}) {
$new_linenum = 'on';
} elsif ((defined($::COOKIE{line_nums}) && $::COOKIE{line_nums} eq 'off')
or (defined($::FORM{line_nums}) && $::FORM{set_line} eq 'off')) {
$linenum_message = 'Line numbers are currently <b>off</b>.';
$new_linenum = 'on';
} else {
$linenum_message = 'Line numbers are currently <b>on</b>.';
$new_linenum = 'off';
}
$src_roots_list = join('<BR>', @src_roots);
print <<__USAGE__;
<HTML>
<HEAD>
<TITLE>CVS Blame $title_text</TITLE>
</HEAD><BODY>
<H2>CVS Blame Usage</H2>
Add parameters to the query string to view a file.
<P>
<TABLE BORDER CELLPADDING=3>
<TR ALIGN=LEFT>
<TH>Param</TH>
<TH>Default</TH>
<TH>Example</TH>
<TH>Description</TH>
</TR><TR>
<TD>file</TD>
<TD>--</TD>
<TD>ns/cmd/Makefile</TD>
<TD>path to file name</TD>
</TR><TR>
<TD>root</TD>
<TD>$src_roots_list</TD>
<TD>/warp/webroot</TD>
<TD>cvs root</TD>
</TR><TR>
<TD>rev</TD>
<TD>HEAD</TD>
<TD>1.3
<BR>ACTRA_branch</TD>
<TD>revision</TD>
</TR><TR>
<TD>line_nums</TD>
<TD>on *</TD>
<TD>on
<BR>off</TD>
<TD>line numbers</TD>
</TR><TR>
<TD>#&lt;line_number&gt;</TD>
<TD>--</TD>
<TD>#111</TD>
<TD>jump to a line</TD>
</TR>
</TABLE>
<P>Examples:
<TABLE><TR><TD>&nbsp;</TD><TD>
<A HREF="cvsblame.cgi?file=ns/cmd/Makefile">
cvsblame.cgi?file=ns/cmd/Makefile</A>
</TD></TR><TR><TD>&nbsp;</TD><TD>
<A HREF="cvsblame.cgi?file=ns/cmd/xfe/mozilla.c&rev=Dogbert4xEscalation_BRANCH">
cvsblame.cgi?file=ns/cmd/xfe/mozilla.c&amp;rev=Dogbert4xEscalation_BRANCH</A>
</TD></TR><TR><TD>&nbsp;</TD><TD>
<A HREF="cvsblame.cgi?file=projects/bonsai/cvsblame.cgi&root=/warp/webroot">
cvsblame.cgi?file=projects/bonsai/cvsblame.cgi&root=/warp/webroot</A>
</TD></TR><TR><TD>&nbsp;</TD><TD>
<A HREF="cvsblame.cgi?file=ns/config/config.mk&line_nums=on">
cvsblame.cgi?file=ns/config/config.mk&amp;line_nums=on</A>
</TD></TR><TR><TD>&nbsp;</TD><TD>
<A HREF="cvsblame.cgi?file=ns/cmd/xfe/dialogs.c#2384">
cvsblame.cgi?file=ns/cmd/xfe/dialogs.c#2384</A>
</TD></TR></TABLE>
<P>
You may also begin a query with the <A HREF="cvsqueryform.cgi">CVS Query Form</A>.
<FORM METHOD='POST' ACTION='cvsblame.cgi'>
<TABLE CELLPADDING=0 CELLSPACING=0>
<TR>
<TD>*<SPACER TYPE=HORIZONTAL SIZE=6></TD>
<TD>Instead of the <i>line_nums</i> parameter, you can
<INPUT TYPE=submit value='set a cookie to turn $new_linenum'>
line numbers.</TD>
</TR><TR>
<TD></TD>
<TD>$linenum_message</TD>
</TR></TABLE>
<INPUT TYPE=hidden NAME='set_line' value='$new_linenum'>
</FORM>
__USAGE__
&print_bottom;
} # sub print_usage
sub print_bottom {
my $maintainer = Param('maintainer');
print <<__BOTTOM__;
<HR WIDTH="100%">
<FONT SIZE=-1>
<A HREF="cvsblame.cgi">Page configuration and help</A>.
Mail feedback to <A HREF="mailto:$maintainer?subject=About the cvsblame script">&lt;$maintainer&gt;</A>.
</FONT></BODY>
</HTML>
__BOTTOM__
} # print_bottom
sub print_raw_data {
my %revs_seen = ();
my $prev_rev = $::revision_map[0];
my $count = 0;
print "<PRE>\n";
for my $rev (@::revision_map) {
if ($prev_rev eq $rev) {
$count++;
} else {
print "$prev_rev:$count\n";
$count = 1;
$prev_rev = $rev;
$revs_seen{$rev} = 1;
}
}
print "$prev_rev:$count\n";
print "REVISION DETAILS\n";
for my $rev (sort keys %revs_seen) {
print "$rev|$::revision_ctime{$rev}|$::revision_author{$rev}|$::revision_log{$rev}.\n";
}
print "</PRE>\n";
}
sub link_includes {
my ($text) = $_[0];
if ($text =~ /\#(\s*)include(\s*)"(.*?)"/) {
foreach my $trial_root (($rcs_path, 'ns/include',
"$rcs_path/Attic", "$rcs_path/..")) {
if (-r "$root/$trial_root/$3,v") {
$text = "$`#$1include$2\"<A HREF='cvsblame.cgi"
."?root=$root&file=$trial_root/$3&rev=".$browse_revtag
."&use_html=$use_html'>$3</A>\";$'";
last;
}
}
}
return $text;
}
my $in_comments = 0;
my $open_delim;
my $close_delim;
my $expected_delim;
sub html_comments_init {
return 0 unless $use_html;
# Initialization for C comment context switching
$in_comments = 0;
$open_delim = '\/\*';
$close_delim = '\*\/';
# Initialize the next expected delim
$expected_delim = $open_delim;
return 1;
}
sub leave_html_comments {
my ($text) = $_[0];
# Allow HTML in the comments.
#
my $newtext = "";
my $oldtext = $text;
while ($oldtext =~ /(.*$expected_delim)(.*\n)/) {
$a = $1;
$b = $2;
# pay no attention to C++ comments within C comment context
if ($in_comments == 0) {
$a =~ s/</&lt;/g;
$a =~ s/>/&gt;/g;
$expected_delim = $close_delim;
$in_comments = 1;
}
else {
$expected_delim = $open_delim;
$in_comments = 0;
}
$newtext = $newtext . $a;
$oldtext = $b;
}
# Handle thre remainder
if ($in_comments == 0){
$oldtext =~ s/</&lt;/g;
$oldtext =~ s/>/&gt;/g;
}
$text = $newtext . $oldtext;
# Now fix the breakage of <username> stuff on xfe. -byrd
if ($text =~ /(.*)<(.*@.*)>(.*\n)/) {
$text = $1 . "<A HREF=mailto:$2?subject=$url_filename>$2</A>" . $3;
}
return $text;
}

View File

@@ -1,906 +0,0 @@
#!/usr/bin/perl --
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
##############################################################################
#
# cvsblame.pl - Shamelessly adapted from Scott Furman's cvsblame script
# by Steve Lamm (slamm@netscape.com)
# - Annotate each line of a CVS file with its author,
# revision #, date, etc.
#
# Report problems to Steve Lamm (slamm@netscape.com)
#
##############################################################################
use strict;
# Shut up misguided -w warnings about "used only once". "use vars" just
# doesn't work for me.
sub cvsblame_pl_sillyness {
my $zz;
$zz = $::file_description;
$zz = $::opt_A;
$zz = $::opt_d;
$zz = $::principal_branch;
$zz = %::lines_added;
$zz = %::lines_removed;
};
use Time::Local qw(timegm); # timestamps
use Date::Format; # human-readable dates
use File::Basename; # splits up paths into path, filename, suffix
my $debug = 0;
$::opt_m = 0 unless (defined($::opt_m));
# Extract base part of this script's name
($::progname = $0) =~ /([^\/]+)$/;
&cvsblame_init;
my $SECS_PER_DAY;
my $time;
sub cvsblame_init {
# Use default formatting options if none supplied
if (!$::opt_A && !$::opt_a && !$::opt_d && !$::opt_v) {
$::opt_a = 1;
$::opt_v = 1;
}
$time = time;
$SECS_PER_DAY = 60 * 60 * 24;
# Timestamp threshold at which annotations begin to occur, if -m option present.
$::opt_m_timestamp = $time;
if (defined $::opt_m) {
$::opt_m_timestamp -= $::opt_m * $SECS_PER_DAY;
}
}
# Generic traversal of a CVS tree. Invoke callback function for
# individual directories that contain CVS files.
sub traverse_cvs_tree {
my ($dir, $nlink);
local *callback;
($dir, *callback, $nlink) = @_;
my ($dev, $ino, $mode, $subcount);
# Get $nlink for top-level directory
($dev, $ino, $mode, $nlink) = stat($dir) unless $nlink;
# Read directory
opendir(DIR, $dir) || die "Can't open $dir\n";
my (@filenames) = readdir(DIR);
closedir(DIR);
return if ! -d "$dir/CVS";
&callback($dir);
# This dir has subdirs
if ($nlink != 2) {
$subcount = $nlink - 2; # Number of subdirectories
for (@filenames) {
last if $subcount == 0;
next if $_ eq '.';
next if $_ eq '..';
next if $_ eq 'CVS';
my $name = "$dir/$_";
($dev, $ino, $mode, $nlink) = lstat($name);
next unless -d _;
if (-x _ && -r _) {
print STDERR "$::progname: Entering $name\n";
&traverse_cvs_tree($name, *callback, $nlink);
} else {
warn("Couldn't chdir to $name");
}
--$subcount;
}
}
}
# Consume one token from the already opened RCSFILE filehandle.
# Unescape string tokens, if necessary.
my $line_buffer;
sub get_token {
# Erase all-whitespace lines.
$line_buffer = '' unless (defined($line_buffer));
while ($line_buffer =~ /^$/) {
die ("Unexpected EOF") if eof(RCSFILE);
$line_buffer = <RCSFILE>;
$line_buffer =~ s/^\s+//; # Erase leading whitespace
}
# A string of non-whitespace characters is a token ...
return $1 if ($line_buffer =~ s/^([^;@][^;\s]*)\s*//o);
# ...and so is a single semicolon ...
return ';' if ($line_buffer =~ s/^;\s*//o);
# ...or an RCS-encoded string that starts with an @ character.
$line_buffer =~ s/^@([^@]*)//o;
my $token = $1;
# Detect single @ character used to close RCS-encoded string.
while ($line_buffer !~ /@/o || # Short-circuit optimization
$line_buffer !~ /([^@]|^)@([^@]|$)/o) {
$token .= $line_buffer;
die ("Unexpected EOF") if eof(RCSFILE);
$line_buffer = <RCSFILE>;
}
# Retain the remainder of the line after the terminating @ character.
my $i = rindex($line_buffer, '@');
$token .= substr($line_buffer, 0, $i);
$line_buffer = substr($line_buffer, $i + 1);
# Undo escape-coding of @ characters.
$token =~ s/@@/@/og;
# Digest any extra blank lines.
while (($line_buffer =~ /^$/) && !eof(RCSFILE)) {
$line_buffer = <RCSFILE>;
}
return $token;
}
my $rcs_pathname;
# Consume a token from RCS filehandle and ensure that it matches
# the given string constant.
sub match_token {
my ($match) = @_;
my ($token) = &get_token;
die "Unexpected parsing error in RCS file $rcs_pathname.\n",
"Expected token: $match, but saw: $token\n"
if ($token ne $match);
}
# Push RCS token back into the input buffer.
sub unget_token {
my ($token) = @_;
$line_buffer = $token . " " . $line_buffer;
}
# Parses "administrative" header of RCS files, setting these globals:
#
# $::head_revision -- Revision for which cleartext is stored
# $::principal_branch
# $::file_description
# %::revision_symbolic_name -- maps from numerical revision # to symbolic tag
# %::tag_revision -- maps from symbolic tag to numerical revision #
#
sub parse_rcs_admin {
my ($token, $tag, $tag_name, $tag_revision);
# Undefine variables, because we may have already read another RCS file
undef %::tag_revision;
undef %::revision_symbolic_name;
while (1) {
# Read initial token at beginning of line
$token = &get_token();
# We're done once we reach the description of the RCS tree
if ($token =~ /^\d/o) {
&unget_token($token);
return;
}
# print "token: $token\n";
if ($token eq "head") {
$::head_revision = &get_token;
&get_token; # Eat semicolon
} elsif ($token eq "branch") {
$::principal_branch = &get_token;
&get_token; # Eat semicolon
} elsif ($token eq "symbols") {
# Create an associate array that maps from tag name to
# revision number and vice-versa.
while (($tag = &get_token) ne ';') {
($tag_name, $tag_revision) = split(':', $tag);
$::tag_revision{$tag_name} = $tag_revision;
$::revision_symbolic_name{$tag_revision} = $tag_name;
}
} elsif ($token eq "comment") {
$::file_description = &get_token;
&get_token; # Eat semicolon
# Ignore all these other fields - We don't care about them.
} elsif (($token eq "locks") ||
($token eq "strict") ||
($token eq "expand") ||
($token eq "access")) {
(1) while (&get_token ne ';');
} else {
warn ("Unexpected RCS token: $token\n");
}
}
die "Unexpected EOF";
}
# Construct associative arrays that represent the topology of the RCS tree
# and other arrays that contain info about individual revisions.
#
# The following associative arrays are created, keyed by revision number:
# %::revision_date -- e.g. "96.02.23.00.21.52"
# %::timestamp -- seconds since 12:00 AM, Jan 1, 1970 GMT
# %::revision_author -- e.g. "tom"
# %::revision_branches -- descendant branch revisions, separated by spaces,
# e.g. "1.21.4.1 1.21.2.6.1"
# %::prev_revision -- revision number of previous *ancestor* in RCS tree.
# Traversal of this array occurs in the direction
# of the primordial (1.1) revision.
# %::prev_delta -- revision number of previous revision which forms
# the basis for the edit commands in this revision.
# This causes the tree to be traversed towards the
# trunk when on a branch, and towards the latest trunk
# revision when on the trunk.
# %::next_delta -- revision number of next "delta". Inverts %::prev_delta.
#
# Also creates %::last_revision, keyed by a branch revision number, which
# indicates the latest revision on a given branch,
# e.g. $::last_revision{"1.2.8"} == 1.2.8.5
#
my %revision_age;
sub parse_rcs_tree {
my ($revision, $date, $author, $branches, $next);
my ($branch, $is_trunk_revision);
# Undefine variables, because we may have already read another RCS file
undef %::timestamp;
undef %revision_age;
undef %::revision_author;
undef %::revision_branches;
undef %::revision_ctime;
undef %::revision_date;
undef %::prev_revision;
undef %::prev_delta;
undef %::next_delta;
undef %::last_revision;
# until we use commitid
my $commitid;
while (1) {
$revision = &get_token;
# End of RCS tree description ?
if ($revision eq 'desc') {
&unget_token($revision);
return;
}
$is_trunk_revision = ($revision =~ /^[0-9]+\.[0-9]+$/);
$::tag_revision{$revision} = $revision;
($branch) = $revision =~ /(.*)\.[0-9]+/o;
$::last_revision{$branch} = $revision;
# Parse date
&match_token('date');
$date = &get_token;
$::revision_date{$revision} = $date;
&match_token(';');
# Convert date into timestamp
my @date_fields = reverse(split(/\./, $date));
$date_fields[4]--; # Month ranges from 0-11, not 1-12
$::timestamp{$revision} = timegm(@date_fields);
# Pretty print the date string
my @ltime = localtime($::timestamp{$revision});
my $formated_date = strftime("%Y-%m-%d %H:%M", @ltime);
$::revision_ctime{$revision} = $formated_date;
# Save age
$revision_age{$revision} =
($time - $::timestamp{$revision}) / $SECS_PER_DAY;
# Parse author
&match_token('author');
$author = &get_token;
$::revision_author{$revision} = $author;
&match_token(';');
# Parse state;
&match_token('state');
while (&get_token ne ';') { }
# Parse branches
&match_token('branches');
$branches = '';
my $token;
while (($token = &get_token) ne ';') {
$::prev_revision{$token} = $revision;
$::prev_delta{$token} = $revision;
$branches .= "$token ";
}
$::revision_branches{$revision} = $branches;
# Parse revision of next delta in chain
&match_token('next');
$next = '';
if (($token = &get_token) ne ';') {
$next = $token;
&get_token; # Eat semicolon
$::next_delta{$revision} = $next;
$::prev_delta{$next} = $revision;
if ($is_trunk_revision) {
$::prev_revision{$revision} = $next;
} else {
$::prev_revision{$next} = $revision;
}
}
if (($token = &get_token) eq 'commitid') {
$commitid = &get_token;
&match_token(';');
} else {
&unget_token($token);
}
if ($debug >= 3) {
print "<pre>revision = $revision\n";
print "date = $date\n";
print "author = $author\n";
print "branches = $branches\n";
print "next = $next\n";
print "commitid = $commitid\n" if defined $commitid;
print "</pre>\n\n";
}
}
}
sub parse_rcs_description {
&match_token('desc');
my $rcs_file_description = &get_token;
}
# Construct associative arrays containing info about individual revisions.
#
# The following associative arrays are created, keyed by revision number:
# %::revision_log -- log message
# %::revision_deltatext -- Either the complete text of the revision,
# in the case of the head revision, or the
# encoded delta between this revision and another.
# The delta is either with respect to the successor
# revision if this revision is on the trunk or
# relative to its immediate predecessor if this
# revision is on a branch.
sub parse_rcs_deltatext {
undef %::revision_log;
undef %::revision_deltatext;
while (!eof(RCSFILE)) {
my $revision = &get_token;
print "Reading delta for revision: $revision\n" if ($debug >= 3);
&match_token('log');
$::revision_log{$revision} = &get_token;
&match_token('text');
$::revision_deltatext{$revision} = &get_token;
}
}
# Reads and parses complete RCS file from already-opened RCSFILE descriptor.
# Or if a parameter is given use the corresponding file
sub parse_rcs_file {
my $path = shift;
if (defined $path) {
open (RCSFILE, $path);
}
print "Reading RCS admin...\n" if ($debug >= 2);
&parse_rcs_admin();
print "Reading RCS revision tree topology...\n" if ($debug >= 2);
&parse_rcs_tree();
if( $debug >= 3 ){
print "<pre>Keys:\n\n";
for my $i (keys %::tag_revision ){
my $k = $::tag_revision{$i};
print "yoyuo $i: $k\n";
}
print "</pre>\n";
}
&parse_rcs_description();
print "Reading RCS revision deltas...\n" if ($debug >= 2);
&parse_rcs_deltatext();
print "Done reading RCS file...\n" if ($debug >= 2);
close RCSFILE if (defined $path);
}
# Map a tag to a numerical revision number. The tag can be a symbolic
# branch tag, a symbolic revision tag, or an ordinary numerical
# revision number.
sub map_tag_to_revision {
my ($tag_or_revision) = @_;
my ($revision) = $::tag_revision{$tag_or_revision};
# Is this a branch tag, e.g. xxx.yyy.0.zzz
if ($revision =~ /(.*)\.0\.([0-9]+)/o) {
my $branch = $1 . '.' . $2;
# Return latest revision on the branch, if any.
return $::last_revision{$branch} if (defined($::last_revision{$branch}));
return $1; # No revisions on branch - return branch point
} else {
return $revision;
}
}
# Construct an ordered list of ancestor revisions to the given
# revision, starting with the immediate ancestor and going back
# to the primordial revision (1.1).
#
# Note: The generated path does not traverse the tree the same way
# that the individual revision deltas do. In particular,
# the path traverses the tree "backwards" on branches.
sub ancestor_revisions {
my ($revision) = @_;
my (@ancestors);
$revision = $::prev_revision{$revision};
while ($revision) {
push(@ancestors, $revision);
$revision = $::prev_revision{$revision};
}
return @ancestors;
}
# Extract the given revision from the digested RCS file.
# (Essentially the equivalent of cvs up -rXXX)
sub extract_revision {
my ($revision) = @_;
my (@path);
my $add_lines_remaining = 0;
my ($start_line, $count);
# Compute path through tree of revision deltas to most recent trunk revision
while ($revision) {
push(@path, $revision);
$revision = $::prev_delta{$revision};
}
@path = reverse(@path);
shift @path; # Get rid of head revision
# Get complete contents of head revision
my (@text) = split(/^/, $::revision_deltatext{$::head_revision});
# Iterate, applying deltas to previous revision
foreach $revision (@path) {
my $adjust = 0;
my @diffs = split(/^/, $::revision_deltatext{$revision});
my ($lines_added) = 0;
my ($lines_removed) = 0;
foreach my $command (@diffs) {
if ($add_lines_remaining > 0) {
# Insertion lines from a prior "a" command.
splice(@text, $start_line + $adjust,
0, $command);
$add_lines_remaining--;
$adjust++;
} elsif ($command =~ /^d(\d+)\s(\d+)/) {
# "d" - Delete command
($start_line, $count) = ($1, $2);
splice(@text, $start_line + $adjust - 1, $count);
$adjust -= $count;
$lines_removed += $count;
} elsif ($command =~ /^a(\d+)\s(\d+)/) {
# "a" - Add command
($start_line, $count) = ($1, $2);
$add_lines_remaining = $count;
$lines_added += $lines_added;
} else {
die "Error parsing diff commands";
}
}
$::lines_removed{$revision} += $lines_removed;
$::lines_added{$revision} += $lines_added;
}
return @text;
}
sub parse_cvs_file {
($rcs_pathname) = @_;
# Args in: $::opt_rev - requested revision
# $::opt_m - time since modified
# Args out: @::revision_map
# %::timestamp
# (%::revision_deltatext)
my @diffs;
my $revision;
my $skip;
my ($start_line, $count);
my @temp;
@::revision_map = ();
CheckHidden($rcs_pathname);
if (!open(RCSFILE, $rcs_pathname)) {
my ($name, $path, $suffix) = fileparse($rcs_pathname);
my $deleted_pathname = "${path}Attic/$name$suffix";
if (!open(RCSFILE, $deleted_pathname)) {
print STDERR "$::progname: This file appeared to be " .
"under CVS control, but the RCS file is inaccessible.\n";
print STDERR "(Couldn't open '" . shell_escape($rcs_pathname) .
"' or '" . shell_escape($deleted_pathname) . "').\n";
die "CVS file is inaccessible.\n";
}
}
&parse_rcs_file();
close(RCSFILE);
if (!defined($::opt_rev) || $::opt_rev eq '' || $::opt_rev eq 'HEAD') {
# Explicitly specified topmost revision in tree
$revision = $::head_revision;
} else {
# Symbolic tag or specific revision number specified.
$revision = &map_tag_to_revision($::opt_rev);
die "$::progname: error: -r: No such revision: $::opt_rev\n"
if ($revision eq '');
}
# The primordial revision is not always 1.1! Go find it.
my $primordial = $revision;
while (exists($::prev_revision{$primordial}) &&
$::prev_revision{$primordial} ne "") {
$primordial = $::prev_revision{$primordial};
}
# Don't display file at all, if -m option is specified and no
# changes have been made in the specified file.
if ($::opt_m && $::timestamp{$revision} < $::opt_m_timestamp) {
return '';
}
# Figure out how many lines were in the primordial, i.e. version 1.1,
# check-in by moving backward in time from the head revision to the
# first revision.
my $line_count = 0;
if (exists ($::revision_deltatext{$::head_revision}) &&
$::revision_deltatext{$::head_revision}) {
my @tmp_array = split(/^/, $::revision_deltatext{$::head_revision});
$line_count = @tmp_array;
}
$skip = 0 unless (defined($skip));
my $rev;
for ($rev = $::prev_revision{$::head_revision}; $rev;
$rev = $::prev_revision{$rev}) {
@diffs = split(/^/, $::revision_deltatext{$rev});
foreach my $command (@diffs) {
if ($skip > 0) {
# Skip insertion lines from a prior "a" command.
$skip--;
} elsif ($command =~ /^d(\d+)\s(\d+)/) {
# "d" - Delete command
($start_line, $count) = ($1, $2);
$line_count -= $count;
} elsif ($command =~ /^a(\d+)\s(\d+)/) {
# "a" - Add command
($start_line, $count) = ($1, $2);
$skip = $count;
$line_count += $count;
} else {
die "$::progname: error: illegal RCS file $rcs_pathname\n",
" error appears in revision $rev\n";
}
}
}
# Now, play the delta edit commands *backwards* from the primordial
# revision forward, but rather than applying the deltas to the text of
# each revision, apply the changes to an array of revision numbers.
# This creates a "revision map" -- an array where each element
# represents a line of text in the given revision but contains only
# the revision number in which the line was introduced rather than
# the line text itself.
#
# Note: These are backward deltas for revisions on the trunk and
# forward deltas for branch revisions.
# Create initial revision map for primordial version.
while ($line_count--) {
push(@::revision_map, $primordial);
}
my @ancestors = &ancestor_revisions($revision);
unshift (@ancestors, $revision); #
pop @ancestors; # Remove "1.1"
$::last_revision = $primordial;
foreach $revision (reverse @ancestors) {
my $is_trunk_revision = ($revision =~ /^[0-9]+\.[0-9]+$/);
if ($is_trunk_revision) {
@diffs = split(/^/, $::revision_deltatext{$::last_revision});
# Revisions on the trunk specify deltas that transform a
# revision into an earlier revision, so invert the translation
# of the 'diff' commands.
foreach my $command (@diffs) {
if ($skip > 0) {
$skip--;
} else {
if ($command =~ /^d(\d+)\s(\d+)$/) { # Delete command
($start_line, $count) = ($1, $2);
$#temp = -1;
while ($count--) {
push(@temp, $revision);
}
splice(@::revision_map, $start_line - 1, 0, @temp);
} elsif ($command =~ /^a(\d+)\s(\d+)$/) { # Add command
($start_line, $count) = ($1, $2);
splice(@::revision_map, $start_line, $count);
$skip = $count;
} else {
die "Error parsing diff commands";
}
}
}
} else {
# Revisions on a branch are arranged backwards from those on
# the trunk. They specify deltas that transform a revision
# into a later revision.
my $adjust = 0;
@diffs = split(/^/, $::revision_deltatext{$revision});
foreach my $command (@diffs) {
if ($skip > 0) {
$skip--;
} else {
if ($command =~ /^d(\d+)\s(\d+)$/) { # Delete command
($start_line, $count) = ($1, $2);
splice(@::revision_map, $start_line + $adjust - 1, $count);
$adjust -= $count;
} elsif ($command =~ /^a(\d+)\s(\d+)$/) { # Add command
($start_line, $count) = ($1, $2);
$skip = $count;
$#temp = -1;
while ($count--) {
push(@temp, $revision);
}
splice(@::revision_map, $start_line + $adjust, 0, @temp);
$adjust += $skip;
} else {
die "Error parsing diff commands";
}
}
}
}
$::last_revision = $revision;
}
return $revision;
}
1;
__END__
#
# The following are parts of the original cvsblame script that are not
# used for cvsblame.pl
#
# Read CVS/Entries and CVS/Repository files.
#
# Creates these associative arrays, keyed by the CVS file pathname
#
# %cvs_revision -- Revision # present in working directory
# %cvs_date
# %cvs_sticky_revision -- Sticky tag, if any
#
# Also, creates %cvs_files, keyed by the directory path, which contains
# a space-separated list of the files under CVS control in the directory
sub read_cvs_entries
{
my ($directory) = @_;
my ($filename, $rev, $date, $idunno, $sticky, $pathname);
$cvsdir = $directory . '/CVS';
CheckHidden($cvsdir);
return if (! -d $cvsdir);
return if !open(ENTRIES, "$cvsdir/Entries");
while(<ENTRIES>) {
chop;
($filename, $rev, $date, $idunno, $sticky) = split("/", substr($_, 1));
($pathname) = $directory . "/" . $filename;
$cvs_revision{$pathname} = $rev;
$cvs_date{$pathname} = $date;
$cvs_sticky_revision{$pathname} = $sticky;
$cvs_files{$directory} .= "$filename\\";
}
close(ENTRIES);
return if !open(REPOSITORY, "$cvsdir/Repository");
$repository = <REPOSITORY>;
chop($repository);
close(REPOSITORY);
$repository{$directory} = $repository;
}
# Given path to file in CVS working directory, compute path to RCS
# repository file. Cache that info for future use.
sub rcs_pathname {
($pathname) = @_;
if ($pathname =~ m@/@) {
($directory,$filename) = $pathname =~ m@(.*)/([^/]+)$@;
} else {
($directory,$filename) = ('.',$pathname);
$pathname = "./" . $pathname;
}
if (!defined($repository{$directory})) {
&read_cvs_entries($directory);
}
if (!defined($cvs_revision{$pathname})) {
die "$::progname: error: File '$pathname' does not appear to be under" .
" CVS control.\n"
}
print STDERR "file: $filename\n" if $debug;
my ($rcs_path) = $repository{$directory} . '/' . $filename . ',v';
return $rcs_path if (-r $rcs_path);
# A file that exists only on the branch, not on the trunk, is found
# in the Attic subdir.
return $repository{$directory} . '/Attic/' . $filename . ',v';
}
sub show_annotated_cvs_file {
my ($pathname) = @_;
my (@output) = ();
my $revision = &parse_cvs_file($pathname);
@text = &extract_revision($revision);
die "$::progname: Internal consistency error" if ($#text != $#::revision_map);
# Set total width of line annotation.
# Warning: field widths here must match format strings below.
$annotation_width = 0;
$annotation_width += 8 if $::opt_a; # author
$annotation_width += 7 if $::opt_v; # revision
$annotation_width += 6 if $::opt_A; # age
$annotation_width += 12 if $::opt_d; # date
$blank_annotation = ' ' x $annotation_width;
if ($multiple_files_on_command_line) {
print "\n", "=" x (83 + $annotation_width);
print "\n$::progname: Listing file: $pathname\n"
}
# Print each line of the revision, preceded by its annotation.
$line = 0;
foreach $revision (@::revision_map) {
$text = $text[$line++];
$annotation = '';
# Annotate with revision author
$annotation .= sprintf("%-8s", $::revision_author{$revision}) if $::opt_a;
# Annotate with revision number
$annotation .= sprintf(" %-6s", $revision) if $::opt_v;
# Date annotation
$annotation .= " $::revision_ctime{$revision}" if $::opt_d;
# Age annotation ?
$annotation .= sprintf(" (%3s)",
int($revision_age{$revision})) if $::opt_A;
# -m (if-modified-since) annotion ?
if ($::opt_m && ($::timestamp{$revision} < $::opt_m_timestamp)) {
$annotation = $blank_annotation;
}
# Suppress annotation of whitespace lines, if requested;
$annotation = $blank_annotation if $::opt_w && ($text =~ /^\s*$/);
# printf "%4d ", $line if $::opt_l;
# print "$annotation - $text";
push(@output, sprintf("%4d ", $line)) if $::opt_l;
push(@output, "$annotation - $text");
}
@output;
}
sub usage {
die
"$::progname: usage: [options] [file|dir]...\n",
" Options:\n",
" -r <revision> Specify CVS revision of file to display\n",
" <revision> can be any of:\n",
" + numeric tag, e.g. 1.23,\n",
" + symbolic branch or revision tag, e.g. CHEDDAR,\n",
" + HEAD keyword (most recent revision on trunk)\n",
" -a Annotate lines with author (username)\n",
" -A Annotate lines with age, in days\n",
" -v Annotate lines with revision number\n",
" -d Annotate lines with date, in local time zone\n",
" -l Annotate lines with line number\n",
" -w Don't annotate all-whitespace lines\n",
" -m <# days> Only annotate lines modified within last <# days>\n",
" -h Print help (this message)\n\n",
" (-a -v assumed, if none of -a, -v, -A, -d supplied)\n"
;
}
&usage if (!&Getopts('r:m:Aadhlvw'));
&usage if ($::opt_h); # help option
$multiple_files_on_command_line = 1 if ($#ARGV != 0);
&cvsblame_init;
sub annotate_cvs_directory
{
my ($dir) = @_;
&read_cvs_entries($dir);
foreach $file (split(/\\/, $cvs_files{$dir})) {
&show_annotated_cvs_file("$dir/$file");
}
}
# No files on command-line ? Use current directory.
push(@ARGV, '.') if ($#ARGV == -1);
# Iterate over files/directories on command-line
while ($#ARGV >= 0) {
$pathname = shift @ARGV;
# Is it a directory ?
if (-d $pathname) {
&traverse_cvs_tree($pathname, *annotate_cvs_directory);
# No, it must be a file.
} else {
&show_annotated_cvs_file($pathname);
}
}
1;

View File

@@ -1,203 +0,0 @@
#!/usr/bin/perl -w
# cvsgraph.cgi -- a graph of all branchs, tags, etc.
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Mozilla Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Jacob Steenhagen
#
# Contributor(s): Jacob Steenhagen <jake@acutex.net>
use strict;
use vars qw{ $revision_ctime $revision_author };
require 'CGI.pl';
# This cgi doesn't actually generate a graph. It relies on the cvsgraph
# program found at http://www.akhphd.au.dk/~bertho/cvsgraph/
# File locations can be set at in the editparams.cgi page.
sub print_top {
my ($title) = @_;
$title ||= "Error";
print "Content-type: text/html\n\n";
print "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">\n";
print "<html>\n<head>\n";
print " <title>$title</title>\n";
print "</head>\n<body>\n";
}
sub print_bottom {
print "</body>\n</html>\n";
}
sub print_usage {
&print_top;
print "This script requires a filename.\n";
&print_bottom;
}
### Live code below
unless (Param('cvsgraph')) {
&print_top;
print "CVS Graph is not currently configured for this installation\n";
&print_bottom;
exit;
}
my @src_roots = getRepositoryList();
# Handle the "file" argument
#
my $filename = '';
$filename = $::FORM{file} if defined $::FORM{file};
if ($filename eq '') {
&print_usage;
exit;
}
my ($file_head, $file_tail) = $filename =~ m@(.*/)?(.+)@;
my $url_filename = url_quote($filename);
# Handle the "root" argument
#
my $root = $::FORM{root};
if (defined $root and $root ne '') {
$root =~ s|/$||;
validateRepository($root);
if (-d $root) {
unshift(@src_roots, $root);
} else {
&print_top;
print "Error: Root, $root, is not a directory.<BR><BR>\n";
&print_bottom;
exit;
}
}
# Find the rcs file
#
my $rcs_filename;
my $found_rcs_file = 0;
foreach (@src_roots) {
$root = $_;
$rcs_filename = "$root/$filename,v";
$rcs_filename = Fix_BonsaiLink($rcs_filename);
$found_rcs_file = 1, last if -r $rcs_filename;
$rcs_filename = "$root/${file_head}Attic/$file_tail,v";
$found_rcs_file = 1, last if -r $rcs_filename;
}
unless ($found_rcs_file) {
my $escaped_filename = html_quote($filename);
my $shell_filename = shell_escape($filename);
&print_top;
print STDERR "cvsgraph.cgi: Rcs file, $shell_filename, does not exist.\n";
print "Invalid filename: $escaped_filename.\n";
&print_bottom;
exit;
}
&ChrootFilename($root, $rcs_filename);
# Hack these variables up the way that the cvsgraph executable wants them
my $full_rcs_filename = $rcs_filename;
$rcs_filename =~ s:^$root/::;
my $conf = $0;
$conf =~ s:[^/\\]+$::;
$conf .= "data/cvsgraph.conf";
my @cvsgraph_cmd = (Param("cvsgraph"),
"-c", $conf,
"-r", $root);
if (defined $::FORM{'image'}) {
print "Content-type: image/png\n\n";
}
else {
push(@cvsgraph_cmd, "-i", "-M", "revmap"); # Include args to make map
&print_top("CVS Graph for " . $file_tail);
print <<"--endquote--" if $::use_dom;
<script $::script_type><!--
var r
function showMessage(rev) {
if (r) {
r.style.display='none'
}
r = document.getElementById('rev_'+rev)
if (!r)
return false
var l = document.getElementById('link_'+rev)
var t = l.offsetTop + 20
r.style.top = t
r.style.left = l.offsetLeft + l.offsetWidth + 20
r.style.display=''
return true
}
function hideMessage() {
if (r) {
r.style.display='none'
return true
}
return false
}
//--></script>
<style type="text/css">
.log_msg {
border-style: solid;
border-color: #F0A000;
background-color: #FFFFFF;
color: #000000;
padding: 5;
position: fixed;
}
</style>
--endquote--
print <<"--endquote--" unless $::use_dom;
<script $::script_type><!--
// Dummy Functions to prevent script errors (this browser does not support DOM)
function showMessage() { return false }
function hideMessage() { return false }
//--></script>
--endquote--
}
system(@cvsgraph_cmd, $rcs_filename);
if (!defined $::FORM{'image'}) {
print qq{<img src="cvsgraph.cgi?file=$url_filename&image=1" };
print qq{usemap="#revmap" alt="$filename" border="0" onclick="hideMessage()">\n};
if ($::use_dom) {
require 'cvsblame.pl';
&parse_cvs_file($full_rcs_filename);
foreach my $rev (keys %::revision_log) {
my $author = EmailFromUsername($::revision_author{$rev});
my $rev_log = html_quote($::revision_log{$rev});
$rev_log =~ s/\n/<br>\n/g;
print qq{<div id="rev_$rev" class="log_msg" style="display:none">\n};
print qq{<b>$rev</b> };
print qq{&lt;<a href="mailto:$author">$author</a>&gt };
print qq{<b>$::revision_ctime{$rev}</b><br>\n};
print $rev_log;
print qq{</div>\n};
}
}
&print_bottom;
}

View File

@@ -1,181 +0,0 @@
# CvsGraph configuration
#
# - Empty lines and whitespace are ignored.
#
# - Comments start with '#' and everything until
# end of line is ignored.
#
# - Strings are C-style strings in which characters
# may be escaped with '\' and written in octal
# and hex escapes. Note that '\' must be escaped
# if it is to be entered as a character.
#
# - Some strings are expanded with printf like
# conversions which start with '%'. Not all
# are applicable at all times, in which case they
# will expand to noting.
# %c = cvsroot (with trailing '/')
# %C = cvsroot (*without* trailing '/')
# %m = module (with trailing '/')
# %M = module (*without* trailing '/')
# %f = filename without path
# %F = filename without path and with ",v" stripped
# %p = path part of filename (with trailing '/')
# %r = number of revisions
# %b = number of branches
# %% = '%'
# %R = the revision number (e.g. '1.2.4.4')
# %P = previous revision number
# %B = the branch number (e.g. '1.2.4')
# %d = date of revision
# %a = author of revision
# %s = state of revision
# %t = current tag of branch or revision
# %0..%9 = command-line argument -0 .. -9
#
# - Numbers may be entered as octal, decimal or
# hex as in 0117, 79 and 0x4f respectively.
#
# - Fonts are numbered 0..4 (defined as in libgd)
# 0 = tiny
# 1 = small
# 2 = medium (bold)
# 3 = large
# 4 = giant
#
# - Colors are a string like html-type colors in
# the form "#rrggbb" with parts written in hex
# rr = red (00..ff)
# gg = green (00-ff)
# bb = blue (00-ff)
#
# - There are several reserved words besides of the
# feature-keywords. These additional reserved words
# expand to numerical values:
# * false = 0
# * true = 1
# * left = 0
# * center = 1
# * right = 2
# * gif = 0
# * png = 1
# * jpeg = 2
# * tiny = 0
# * small = 1
# * medium = 2
# * large = 3
# * giant = 4
# cvsroot <string>
# The *absolute* base directory where the
# CSV/RCS repository can be found
# cvsmodule <string>
#
#cvsroot = "/cvsroot";
#cvsmodule = "";
# color_bg <color>
# The background color of the image
color_bg = "#ffffff";
# date_format <string>
# The strftime(3) format string for date and time
date_format = "%Y-%m-%d %H:%M:%S";
box_shadow = true;
tag_font = medium;
tag_color = "#007000";
rev_font = giant;
rev_color = "#000000";
rev_bgcolor = "#f0f0f0";
rev_separator = 1;
rev_minline = 15;
rev_maxline = 30;
rev_lspace = 5;
rev_rspace = 5;
rev_tspace = 3;
rev_bspace = 3;
rev_text = "%d\n%a"; # or "%d\n%a, %s" for author and state too
rev_text_font = tiny;
rev_text_color = "#500020";
# branch_font <number>
# The font of the number and tags
# branch_color <color>
# All branch element's color
# branch_[lrtb]space <number>
# Interior spacing (margin)
# branch_margin <number>
# Exterior spacing
# branch_connect <number>
# Length of the vertical connector
branch_font = medium;
branch_color = "#0000c0";
branch_bgcolor = "#ffffc0";
branch_lspace = 5;
branch_rspace = 5;
branch_tspace = 3;
branch_bspace = 3;
branch_margin = 15;
branch_connect = 8;
# title <string>
# The title string is expanded (see above for details)
# title_[xy] <number>
# Postion of title
# title_font <number>
# The font
# title_align <number>
# 0 = left
# 1 = center
# 2 = right
# title_color <color>
title = "%m%f\nRevisions: %r, Branches: %b";
title_x = 10;
title_y = 5;
title_font = small;
title_align = left;
title_color = "#800000";
# Margins of the image
# Note: the title is outside the margin
margin_top = 35;
margin_bottom = 10;
margin_left = 10;
margin_right = 10;
# Image format(s)
# image_type <number|{gif,jpeg,png}>
# gif (0) = Create gif image
# png (1) = Create png image
# jpeg (2) = Create jpeg image
# Image types are available if they can be found in
# the gd library. Newer versions of gd do not have
# gif anymore. CvsGraph will automatically generate
# png images instead.
# image_quality <number>
# The quality of a jpeg image (1..100)
image_type = png;
image_quality = 75;
# HTML ImageMap generation
# map_name <string>
# The name= attribute in <map name="mapname">...</map>
# map_branch_href <string>
# map_branch_alt <string>
# map_rev_href <string>
# map_rev_alt <string>
# map_diff_href <string>
# map_diff_alt <string>
# These are the href= and alt= attributes in the <area>
# tags of html. The strings are expanded (see above).
map_name = "revmap";
map_branch_href = "href=\"cvslog.cgi?file=%p%F&rev=%t\"";
map_branch_alt = "alt=\"%t\" %2";
map_rev_href = "href=\"cvsblame.cgi?file=%p%F&rev=%R\"";
map_rev_alt = "alt=\"%a\" onmouseover=\"showMessage('%R')\" id=\"link_%R\" %3";
map_diff_href = "href=\"%9cvsview2.cgi?diff_mode=context&whitespace_mode=show&file=%F&subdir=%p&command=DIFF_FRAMESET&rev1=%P&rev2=%R\"";
map_diff_alt = "alt=\"%P &lt;-&gt; %R\" %4";

View File

@@ -1,89 +0,0 @@
#!/usr/bin/perl -w
# -*- Mode: perl; indent-tabs-mode: nil -*-
#
# The contents of this file are subject to the Netscape Public
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/NPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is the Bonsai CVS tool.
#
# The Initial Developer of the Original Code is Netscape Communications
# Corporation. Portions created by Netscape are
# Copyright (C) 1998 Netscape Communications Corporation. All
# Rights Reserved.
#
# Contributor(s):
use strict;
require 'CGI.pl';
my $file= $::FORM{'file'};
my $mark= &SanitizeMark($::FORM{'mark'});
my $ln = (($mark =~ m/^\d+$/ && $mark > 10) ? $mark-10 : 1 );
my $rev = &SanitizeRevision($::FORM{'rev'});
my $debug = $::FORM{'debug'};
print "Content-Type: text/html\n\n";
my $CVS_ROOT = $::FORM{'root'};
if( !defined($CVS_ROOT) || $CVS_ROOT eq '' ){
$CVS_ROOT = pickDefaultRepository();
}
validateRepository($CVS_ROOT);
my $CVS_REPOS_SUFIX = $CVS_ROOT;
$CVS_REPOS_SUFIX =~ s/\//_/g;
&ConnectToDatabase();
my @bind_values = ( $CVS_ROOT, $file );
my $qstring = "SELECT DISTINCT dirs.dir FROM checkins,dirs,files," .
"repositories WHERE dirs.id=dirid AND files.id=fileid AND " .
"repositories.id=repositoryid AND repositories.repository=? AND " .
"files.file=? ORDER BY dirs.dir";
if ($debug) {
print "<pre wrap>\n";
print &html_quote($qstring) . "\n";
print "With values:\n";
foreach my $v (@bind_values) {
print "\t" . &html_quote($v) . "\n";
}
print "</pre>\n";
}
my (@row, $d, @fl, $s);
&SendSQL($qstring, @bind_values);
while(@row = &FetchSQLData()){
$d = $row[0];
push @fl, "$d/$file";
}
&DisconnectFromDatabase();
if( @fl == 0 ){
print "<h3>No files matched this file name: " . &html_quote($file) .
". It may have been added recently.</h3>";
}
elsif( @fl == 1 ){
$s = &url_quote($fl[0]);
print "<head>
<meta http-equiv=Refresh
content=\"0; URL=cvsblame.cgi?file=$s&rev=$rev&root=$CVS_ROOT&mark=$mark#$ln\">
</head>
";
}
else {
print "<h3>Pick the file that best matches the one you are looking for:</h3>\n";
for $s (@fl) {
print "<dt><a href=cvsblame.cgi?file=" . &url_quote($s) .
"&rev=$rev&mark=$mark#$ln>" . &html_quote($s) . "</a>";
}
}

Some files were not shown because too many files have changed in this diff Show More