abiao лет назад: 5
Родитель
Сommit
c6cb47a90a
100 измененных файлов с 17824 добавлено и 0 удалено
  1. BIN
      go/gopath/src/DocTo/test/InputFiles/Ballymaloe+Hot+Buttered+Lobster (2).doc
  2. BIN
      go/gopath/src/DocTo/test/InputFiles/Ballymaloe+Hot+Buttered+Lobster 3 .doc
  3. BIN
      go/gopath/src/DocTo/test/InputFiles/Book1 with password.xls
  4. BIN
      go/gopath/src/DocTo/test/InputFiles/CullohillApplePie - Copy.doc
  5. BIN
      go/gopath/src/DocTo/test/InputFiles/CullohillApplePie.doc
  6. BIN
      go/gopath/src/DocTo/test/InputFiles/CullohillApplePie_Protected.doc
  7. BIN
      go/gopath/src/DocTo/test/InputFiles/Document Forces Save - GEOCADx (1).docx
  8. BIN
      go/gopath/src/DocTo/test/InputFiles/GEOCADx (1).docx
  9. BIN
      go/gopath/src/DocTo/test/InputFiles/Pie3.doc
  10. BIN
      go/gopath/src/DocTo/test/InputFiles/PigeonPie.doc
  11. BIN
      go/gopath/src/DocTo/test/InputFiles/UnicodeTest.doc
  12. 86 0
      go/gopath/src/DocTo/test/InputFiles/Week 1 Test.csv
  13. BIN
      go/gopath/src/DocTo/test/InputFiles/Week 1 Test.xls
  14. 29 0
      go/gopath/src/DocTo/test/InputFiles/pie3Text.txt
  15. 23 0
      go/gopath/src/DocTo/test/doctoFormatList.txt
  16. 72 0
      go/gopath/src/DocTo/test/testDocTo.bat
  17. 5 0
      go/gopath/src/DocTo/test/testhook.bat
  18. 5 0
      go/gopath/src/DocTo/test/testhttps.bat
  19. 4 0
      go/gopath/src/DocTo/test/testlogging.bat
  20. 6 0
      go/gopath/src/DocTo/test/testremove.bat
  21. 53 0
      go/gopath/src/DocTo/test/testrun longparam.bat
  22. 2 0
      go/gopath/src/DocTo/test/testrun_DirOnly.bat
  23. 2 0
      go/gopath/src/DocTo/test/testrun_Rachel.bat
  24. 8 0
      go/gopath/src/DocTo/test/testrunbreak.bat
  25. 6 0
      go/gopath/src/DocTo/test/testrundironly.bat
  26. 2 0
      go/gopath/src/DocTo/test/testspecific.bat
  27. 5 0
      go/gopath/src/DocTo/test/testxlsslashes.bat
  28. 9 0
      go/gopath/src/github.com/adjust/gorails/.travis.yml
  29. 20 0
      go/gopath/src/github.com/adjust/gorails/LICENSE
  30. 9 0
      go/gopath/src/github.com/adjust/gorails/README.md
  31. 58 0
      go/gopath/src/github.com/adjust/gorails/marshal/README.md
  32. 448 0
      go/gopath/src/github.com/adjust/gorails/marshal/marshal.go
  33. 419 0
      go/gopath/src/github.com/adjust/gorails/marshal/marshal_test.go
  34. 35 0
      go/gopath/src/github.com/adjust/gorails/session/README.md
  35. 73 0
      go/gopath/src/github.com/adjust/gorails/session/session.go
  36. 30 0
      go/gopath/src/github.com/alidayu/README.md
  37. 34 0
      go/gopath/src/github.com/alidayu/body.go
  38. 23 0
      go/gopath/src/github.com/alidayu/call_double.go
  39. 23 0
      go/gopath/src/github.com/alidayu/call_tts.go
  40. 22 0
      go/gopath/src/github.com/alidayu/call_voice.go
  41. 12 0
      go/gopath/src/github.com/alidayu/const.go
  42. 34 0
      go/gopath/src/github.com/alidayu/post.go
  43. 27 0
      go/gopath/src/github.com/alidayu/sms_send.go
  44. 164 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/README.md
  45. 92 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
  46. 618 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
  47. 1687 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket_test.go
  48. 739 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
  49. 1377 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/client_test.go
  50. 67 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
  51. 420 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
  52. 124 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/conn_test.go
  53. 82 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
  54. 44 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go
  55. 398 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/crc_test.go
  56. 399 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
  57. 352 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/download_test.go
  58. 82 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
  59. 245 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
  60. 60 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
  61. 414 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
  62. 468 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy_test.go
  63. 280 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
  64. 946 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart_test.go
  65. 346 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
  66. 251 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/option_test.go
  67. 442 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
  68. 127 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/type_test.go
  69. 438 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
  70. 447 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/upload_test.go
  71. 165 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
  72. 105 0
      go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/utils_test.go
  73. 5 0
      go/gopath/src/github.com/astaxie/beego/.gitignore
  74. 51 0
      go/gopath/src/github.com/astaxie/beego/.travis.yml
  75. 52 0
      go/gopath/src/github.com/astaxie/beego/CONTRIBUTING.md
  76. 13 0
      go/gopath/src/github.com/astaxie/beego/LICENSE
  77. 61 0
      go/gopath/src/github.com/astaxie/beego/README.md
  78. 424 0
      go/gopath/src/github.com/astaxie/beego/admin.go
  79. 355 0
      go/gopath/src/github.com/astaxie/beego/adminui.go
  80. 362 0
      go/gopath/src/github.com/astaxie/beego/app.go
  81. 91 0
      go/gopath/src/github.com/astaxie/beego/beego.go
  82. 59 0
      go/gopath/src/github.com/astaxie/beego/cache/README.md
  83. 103 0
      go/gopath/src/github.com/astaxie/beego/cache/cache.go
  84. 168 0
      go/gopath/src/github.com/astaxie/beego/cache/cache_test.go
  85. 100 0
      go/gopath/src/github.com/astaxie/beego/cache/conv.go
  86. 143 0
      go/gopath/src/github.com/astaxie/beego/cache/conv_test.go
  87. 274 0
      go/gopath/src/github.com/astaxie/beego/cache/file.go
  88. 190 0
      go/gopath/src/github.com/astaxie/beego/cache/memcache/memcache.go
  89. 108 0
      go/gopath/src/github.com/astaxie/beego/cache/memcache/memcache_test.go
  90. 244 0
      go/gopath/src/github.com/astaxie/beego/cache/memory.go
  91. 240 0
      go/gopath/src/github.com/astaxie/beego/cache/redis/redis.go
  92. 107 0
      go/gopath/src/github.com/astaxie/beego/cache/redis/redis_test.go
  93. 240 0
      go/gopath/src/github.com/astaxie/beego/cache/ssdb/ssdb.go
  94. 103 0
      go/gopath/src/github.com/astaxie/beego/cache/ssdb/ssdb_test.go
  95. 442 0
      go/gopath/src/github.com/astaxie/beego/config.go
  96. 144 0
      go/gopath/src/github.com/astaxie/beego/config/config.go
  97. 134 0
      go/gopath/src/github.com/astaxie/beego/config/fake.go
  98. 464 0
      go/gopath/src/github.com/astaxie/beego/config/ini.go
  99. 184 0
      go/gopath/src/github.com/astaxie/beego/config/ini_test.go
  100. 0 0
      go/gopath/src/github.com/astaxie/beego/config/json.go

BIN
go/gopath/src/DocTo/test/InputFiles/Ballymaloe+Hot+Buttered+Lobster (2).doc


BIN
go/gopath/src/DocTo/test/InputFiles/Ballymaloe+Hot+Buttered+Lobster 3 .doc


BIN
go/gopath/src/DocTo/test/InputFiles/Book1 with password.xls


BIN
go/gopath/src/DocTo/test/InputFiles/CullohillApplePie - Copy.doc


BIN
go/gopath/src/DocTo/test/InputFiles/CullohillApplePie.doc


BIN
go/gopath/src/DocTo/test/InputFiles/CullohillApplePie_Protected.doc


BIN
go/gopath/src/DocTo/test/InputFiles/Document Forces Save - GEOCADx (1).docx


BIN
go/gopath/src/DocTo/test/InputFiles/GEOCADx (1).docx


BIN
go/gopath/src/DocTo/test/InputFiles/Pie3.doc


BIN
go/gopath/src/DocTo/test/InputFiles/PigeonPie.doc


BIN
go/gopath/src/DocTo/test/InputFiles/UnicodeTest.doc


+ 86 - 0
go/gopath/src/DocTo/test/InputFiles/Week 1 Test.csv

@@ -0,0 +1,86 @@
+,,Kitchen 1,,,,,,,,,Kitchen 2,,,,,,,,,,Demo ,,,,Kitchen 3,,,,,,,,
+Recipe,,Red,Green,Yellow,White,Orange,Brown,Pink,Blue,Black,Orange,Black,Yellow,Grey,Pink,Red,Blue,Green,Brown,White,Demo 1,Demo 2,Demo 3,Demo 4,Red,Purple,Black,Yellow,Orange,Green,White,Pink,Brown
+Beginners Brown Bread,,1,,1,,1,,1,,1,,1,,1,,1,,1,,1,1,,1,,1,,1,,1,,1,,1
+Onion & Thyme Leaf Soup,,1,,,,1,,,,1, ,, ,1, ,, ,1, ,,1,,,,,,,1,1,,,,1
+Potato & Fresh Herb soup,,,1,,,,1,,,,1,,,,1,,,,1,,,1,,,1,,,,,1,,,
+Potato Soup with Wild Garlic Pesto,,,,1,,,,1,,,,1,, ,,1,, ,,1,,,1,,,1,,,,,1,,
+Potato Soup with Gubeen Chorizo & Flat Parsley,,,,,1,,,,1,,,,1,,,,1,,,,,,,1,,,1,,,,,1,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+Gruyere & Dill Tart   (pastry 6:3),,1,1,1,,,,,,,1,1,1,,,,,,,,1,,,,1,1,1,,,,,,
+French Onion Tart   (pastry 6:3),,,,,1,1,1,,,,,,,1,1,1,,,,,,1,,,,,,1,1,1,,,
+Mushroom & Thyme Leaf Tart   (pastry 6:3),,,,,,,,1,1,,,,,,,,1,1,1,,,,1,,,,,,,,1,1,
+Asparagus & Spring Onion Tart (pastry 6:3),,,,,,,,,,1,,,,,,,,,,1,,,,,,,,,,,,,1
+Pennys Cabbage Salad,,,,,1,,,,1,,1,,,,,,1,,,,,,,,,1,,,,,,1,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+Bananas in Lime Syrup,,1,,1,,1,,1,,1,1,,1,,1,,1,,1,,1,,1,,,1,,1,1,1,,,
+Rhubarb & Strawberry Compote,,,1,,1,,1,,1,,,1,,1,,1,,1,,1,,1,,1,1,,1,1,,,1,1,
+Fork Biscuits,,,1,,,,1,,,1,,,1,,1,,,,1,,,1,,,,,1,,,1,,,1
+Candied Peel,,Lemonade Duty Students to start candied peel,,,,,,,,,,,,,,,,,,,,,,,Lemonade Duty Students to start candied peel,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+,,,,,,,,,,Some More Data,,,,,,,,,,,,,,,,,,,,,,,

BIN
go/gopath/src/DocTo/test/InputFiles/Week 1 Test.xls


Разница между файлами не показана из-за своего большого размера
+ 29 - 0
go/gopath/src/DocTo/test/InputFiles/pie3Text.txt


+ 23 - 0
go/gopath/src/DocTo/test/doctoFormatList.txt

@@ -0,0 +1,23 @@
+wdFormatDOSTextLineBreaks txt
+wdFormatEncodedText txt
+wdFormatFilteredHTML html
+wdFormatOpenDocumentText odt
+wdFormatHTML html
+wdFormatRTF rtf
+wdFormatStrictOpenXMLDocument oxd
+wdFormatTemplate dot
+wdFormatText txt
+wdFormatTextLineBreaks txt
+wdFormatUnicodeText txt
+wdFormatWebArchive .webarchive
+wdFormatXML xml  
+wdFormatDocument doc
+wdFormatDocumentDefault doc
+wdFormatPDF pdf
+wdFormatTemplate dot
+wdFormatXMLDocument xml 
+wdFormatXMLDocumentMacroEnabled xml
+wdFormatXMLTemplate xml 
+wdFormatXMLTemplateMacroEnabled xml
+wdFormatXPS xps
+ 

+ 72 - 0
go/gopath/src/DocTo/test/testDocTo.bat

@@ -0,0 +1,72 @@
+REM Explaination 
+REM Test runs to check docto scenarios
+REM execute docto inserting variables 
+REM %~d0 and %~p0 together give the full directory this batch file is executing in.
+
+
+REM Remove all generated files from output directory that may exist.
+ del GeneratedFiles\*.* /q
+
+
+REM Output Help Text
+"../exe/docto.exe" -h
+
+
+REM ---------------------------------
+REM Main Test : 
+REM Loop through each format provided in doctoFormatList and try to convert our
+REM test file to each format.
+REM ---------------------------------
+
+FOR /F "eol=; tokens=1,2* delims=, " %%i in (doctoFormatList.txt) do "../exe/docto.exe"  -f "%~d0%~p0\Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\pie3out_%%i.%%j"  -T  %%i
+
+
+
+REM ---------------------------------
+REM Convert All files in Input Directory to PDF 
+REM ---------------------------------
+
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\"  -o "%~d0%~p0GeneratedFiles"    -T  wdFormatPDF -OX pdf
+
+
+REM Try on Single
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3Single.pdf"    -T  wdFormatPDF
+
+REM Try on Single no output file with Verbose Logging
+"../exe/docto.exe" -L 10 -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\SingleDir\"    -T  wdFormatPDF  
+
+REM Try on Single no output file with Verbose Logging
+"../exe/docto.exe" -L 10 -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\SingleDirNoSlash"    -T  wdFormatXMLDocument 
+
+
+
+REM Should produce an error incorrect format.
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatTestPDF
+
+REM Should produce an error - input file does not exist
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3_doesntexist.doc"  -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF
+
+REM Test http Webhook
+REM ---------------------------------
+REM To view visit https://toflidium.com/webhooks/docto/docto_test_values.txt
+REM ---------------------------------
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -W http://toflidium.com/webhooks/docto/webhook_test.php
+REM Check https webhook.
+
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -W https://toflidium.com/webhooks/docto/webhook_test.php
+
+
+REM If output Dir left out default to input
+copy  "%~d0%~p0Inputfiles\pie3.doc"  "%~d0%~p0GeneratedFiles\PieNoOutputTest.doc"
+"../exe/docto.exe"  -f "%~d0%~p0GeneratedFiles\PieNoOutputTest.doc"    -T  wdFormatPDF
+
+REM Check that works with -o before -f
+"../exe/docto.exe"    -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -f "%~d0%~p0Inputfiles\pie3.doc" 
+
+REM Check Unicode to txt conversion. issue #32
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\UnicodeTest.doc"  -o "%~d0%~p0GeneratedFiles\UnicodeTest.txt"    -T  wdFormatEncodedText -E 65001 
+
+REM Check Logging
+"../exe/docto.exe"    -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -f "%~d0%~p0Inputfiles\pie3.doc"  -G -L 10
+"../exe/docto.exe"    -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -f "%~d0%~p0Inputfiles\pie3.doc"  -GL outputlog.log -L 10
+

+ 5 - 0
go/gopath/src/DocTo/test/testhook.bat

@@ -0,0 +1,5 @@
+REM Test Webhook
+REM ---------------------------------
+REM To view visit http://toflidium.com/webhooks/docto/docto_test_values.txt
+REM ---------------------------------
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3.pdf" -L 10   -T  wdFormatPDF -W http://toflidium.com/webhooks/docto/webhook_test.php

+ 5 - 0
go/gopath/src/DocTo/test/testhttps.bat

@@ -0,0 +1,5 @@
+REM Test Webhook
+REM ---------------------------------
+REM To view visit https://toflidium.com/webhooks/docto/docto_test_values.txt
+REM ---------------------------------
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -W https://webhook.azure.cookingisfun.ie/doctotest.php -L 10

+ 4 - 0
go/gopath/src/DocTo/test/testlogging.bat

@@ -0,0 +1,4 @@
+REM Check Logging
+"../exe/docto.exe" -v
+"../exe/docto.exe"    -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -f "%~d0%~p0Inputfiles\pie3.doc"  -G -L 10
+"../exe/docto.exe"    -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -f "%~d0%~p0Inputfiles\pie3.doc"  -GL outputlog.log -L 10

+ 6 - 0
go/gopath/src/DocTo/test/testremove.bat

@@ -0,0 +1,6 @@
+REM Check Logging
+"../exe/docto.exe" -v
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles2\pie3.doc"   -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -R -G -L 10  
+cd "%~d0%~p0Inputfiles2\"
+dir
+Pause

+ 53 - 0
go/gopath/src/DocTo/test/testrun longparam.bat

@@ -0,0 +1,53 @@
+REM Explaination 
+REM For use of FOR type FOR /? at prompt
+REM The first part specifies ; lines to be ignored
+REM I want the first 2 items (delimited by , or [space] on each line from the file specified.
+REM Variables will be %%i (%%j is created automatically)
+REM execute docto inserting variables 
+REM %~d0 and %~p0 together give the full directory this batch file is executing in.
+
+
+REM Remove all generated files that may exist.
+del GeneratedFiles\*.* /q
+pause
+
+"../exe/docto.exe" -h
+
+REM Individually try each format on Test Document
+REM FOR /F "eol=; tokens=1,2* delims=, " %%i in (testdata.txt) do "../exe/docto.exe"  --inputfile "%~d0%~p0\Inputfiles\pie3.doc"  --outputfile "%~d0%~p0GeneratedFiles\pie3out_%%i.%%j"  -T  %%i
+
+REM Try on Directory
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\"  -o "%~d0%~p0GeneratedFiles"    --format  wdFormatPDF --outputextension pdf
+
+
+REM Try on Single
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3Single.pdf"    --FORMAT  wdFormatPDF
+
+REM Try on Single no output file with Verbose Logging
+"../exe/docto.exe" --LOGLEVEL 10 -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\SingleDir\"    --FORMAT  wdFormatPDF  
+
+REM Try on Single no output file with Verbose Logging
+"../exe/docto.exe" -L 10 -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\SingleDirNoSlash"    -T  wdFormatXMLDocument 
+
+
+
+REM Should produce an error
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatTestPDF
+
+REM Should produce an error
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3_doesntexist.doc"  -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF
+
+REM Test Webhook
+REM *********************************
+REM To view visit http://toflidium.com/webhooks/docto/docto_test_values.txt
+REM *********************************
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3.pdf"    -T  wdFormatPDF -W http://home.kinchik.ie/webhooks/docto/all.php
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3b.pdf"    -T  wdFormatPDF -W http://home.kinchik.ie/webhooks/docto/all.php?
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\Pie3c.pdf"    -T  wdFormatPDF -W http://home.kinchik.ie/webhooks/docto/all.php?Aparam=avalueIhave
+
+
+
+REM XLSTO
+
+REM Try on Directory
+"../exe/xlsto.exe"  -f "%~d0%~p0Inputfiles\"  -o "%~d0%~p0GeneratedFiles"    -T  xlPDF -OX .pdf

+ 2 - 0
go/gopath/src/DocTo/test/testrun_DirOnly.bat

@@ -0,0 +1,2 @@
+REM Try on Directory
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\"  -o "%~d0%~p0GeneratedFiles"     -T  wdFormatPDF -OX .pdf  -W http://toflidium.com/webhooks/docto/webhook_test.php -M true

+ 2 - 0
go/gopath/src/DocTo/test/testrun_Rachel.bat

@@ -0,0 +1,2 @@
+REM Try on Directory
+"../exe/docto.exe"  -f "%~d0%~p0Rachel Recipes\"  -o "%~d0%~p0GeneratedFiles\RR\"    -T  wdFormatDocumentDefault -C 65535 -OX .docx -X false -W http://toflidium.com/webhooks/docto/webhook_full.php

+ 8 - 0
go/gopath/src/DocTo/test/testrunbreak.bat

@@ -0,0 +1,8 @@
+REM Try on Single no output file
+cls
+"../exe/docto.exe" -V
+"../exe/docto.exe"  -L 0 -G -f "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\SingleDocFolderCreated5"    -T  wdFormatPDddd
+
+"../exe/docto.exe"  -L 10 -G -f  "%~d0%~p0Inputfiles\pie3.doc"  -o "%~d0%~p0GeneratedFiles\SingleDocFolderCreated6"  --wpp  -T  wdFormatRTF
+
+Pause

+ 6 - 0
go/gopath/src/DocTo/test/testrundironly.bat

@@ -0,0 +1,6 @@
+REM Try on Directory
+"../exe/docto.exe"  -f "%~d0%~p0Inputfiles\"  -o "%~d0%~p0GeneratedFiles"    -T  wdFormatPDF -OX pdf -W http://toflidium.com/webhooks/docto/webhook_test.php -Q
+
+REM Try on Relative Directory
+"../exe/docto.exe"  -f "..\test\Inputfiles\"  -o "%~d0%~p0GeneratedFiles"    -T  wdFormatPDF -OX pdf 
+

+ 2 - 0
go/gopath/src/DocTo/test/testspecific.bat

@@ -0,0 +1,2 @@
+REM Check that works with -o before -f
+"../exe/docto.exe"    -o "%~d0%~p0GeneratedFiles3"    -T  wdFormatPDF -f "%~d0%~p0Inputfiles2\" -L 10

+ 5 - 0
go/gopath/src/DocTo/test/testxlsslashes.bat

@@ -0,0 +1,5 @@
+REM XLSTO
+
+
+REM Try on Single no output file with Verbose Logging
+ "../exe/xlsto.exe" -L 10 -f "%~d0%~p0Inputfiles"  -o "%~d0%~p0GeneratedFiles\testcreatedir\"    -T  xlcsv 

+ 9 - 0
go/gopath/src/github.com/adjust/gorails/.travis.yml

@@ -0,0 +1,9 @@
+language: go
+go:
+  - 1.1
+  - 1.2
+  - 1.3
+  - tip
+
+install:
+ - go get code.google.com/p/go.crypto/pbkdf2

+ 20 - 0
go/gopath/src/github.com/adjust/gorails/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Andrew Slotin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 9 - 0
go/gopath/src/github.com/adjust/gorails/README.md

@@ -0,0 +1,9 @@
+gorails
+=======
+
+[![Build Status](https://travis-ci.org/adjust/gorails.png)](https://travis-ci.org/adjust/gorails)
+
+A set of go packages to integrate your Go app into existing Rails project.
+
+* [gorails/session](https://github.com/adjust/gorails/tree/master/session) - decrypts session cookie set by Rails 4 app
+* [gorails/marshal](https://github.com/adjust/gorails/tree/master/marshal) - unmarshalling objects serialized with Ruby Marshal

+ 58 - 0
go/gopath/src/github.com/adjust/gorails/marshal/README.md

@@ -0,0 +1,58 @@
+gorails/marshal
+===============
+
+[![Build Status](https://travis-ci.org/adjust/gorails.png)](https://travis-ci.org/adjust/gorails)
+
+## Installation
+
+With Go and git installed:
+
+```
+go get -u github.com/adjust/gorails/marshal
+```
+
+Or you can use [Goem](http://big-elephants.com/2013-09/goem-the-missing-go-extension-manager/).
+
+## Usage
+
+```go
+import (
+  "errors"
+  "github.com/adjust/gorails/marshal"
+)
+
+func getAuthUserId(decrypted_session_data []byte) (user_id int64, err error) {
+  unauthorized_user := errors.New("Unauthorized user")
+  invalid_auth_data := errors.New("Invalid auth data")
+
+  session_data, err := marshal.CreateMarshalledObject(decrypted_session_data).GetAsMap()
+  if err != nil {
+    return
+  }
+
+  warden_data, ok := session_data["warden.user.user.key"]
+  if !ok {
+    return 0, unauthorized_user
+  }
+
+  warden_user_key, err := warden_data.GetAsArray()
+  if err != nil {
+    return
+  }
+  if len(warden_user_key) < 1 {
+    return 0, invalid_auth_data
+  }
+
+  user_data, err := warden_user_key[0].GetAsArray()
+  if err != nil {
+    return
+  }
+  if len(user_data) < 1 {
+    return 0, invalid_auth_data
+  }
+
+  user_id, err = user_data[0].GetAsInteger()
+
+  return
+}
+```

+ 448 - 0
go/gopath/src/github.com/adjust/gorails/marshal/marshal.go

@@ -0,0 +1,448 @@
+package marshal
+
+import (
+	"errors"
+	"strconv"
+)
+
+type MarshalledObject struct {
+	MajorVersion byte
+	MinorVersion byte
+
+	data         []byte
+  symbolCache  *[]string
+  objectCache  *[]*MarshalledObject
+  size         int
+}
+
+type marshalledObjectType byte
+
+var TypeMismatch = errors.New("gorails/marshal: an attempt to implicitly typecast a marshalled object")
+var IncompleteData = errors.New("gorails/marshal: incomplete data")
+
+const (
+	TYPE_UNKNOWN marshalledObjectType = 0
+	TYPE_NIL     marshalledObjectType = 1
+	TYPE_BOOL    marshalledObjectType = 2
+	TYPE_INTEGER marshalledObjectType = 3
+	TYPE_FLOAT   marshalledObjectType = 4
+	TYPE_STRING  marshalledObjectType = 5
+	TYPE_ARRAY   marshalledObjectType = 6
+	TYPE_MAP     marshalledObjectType = 7
+)
+
+func newMarshalledObject(major_version, minor_version byte, data []byte, symbolCache *[]string, objectCache *[]*MarshalledObject) *MarshalledObject {
+	return newMarshalledObjectWithSize(major_version, minor_version, data, len(data), symbolCache, objectCache)
+}
+
+func newMarshalledObjectWithSize(major_version, minor_version byte, data []byte, size int, symbolCache *[]string, objectCache *[]*MarshalledObject) *MarshalledObject {
+	return &(MarshalledObject{major_version, minor_version, data, symbolCache, objectCache, size})
+}
+
+func CreateMarshalledObject(serialized_data []byte) *MarshalledObject {
+	symbolCache := []string{}
+	objectCache := []*MarshalledObject{}
+	return newMarshalledObject(serialized_data[0], serialized_data[1], serialized_data[2:], &symbolCache, &objectCache)
+}
+
+func (obj *MarshalledObject) GetType() marshalledObjectType {
+	if len(obj.data) == 0 {
+		return TYPE_UNKNOWN
+	}
+
+	if ref := obj.resolveObjectLink(); ref != nil {
+		return ref.GetType()
+	}
+
+	switch obj.data[0] {
+	case '0':
+		return TYPE_NIL
+	case 'T', 'F':
+		return TYPE_BOOL
+	case 'i':
+		return TYPE_INTEGER
+	case 'f':
+		return TYPE_FLOAT
+	case ':', ';':
+		return TYPE_STRING
+	case 'I':
+		if len(obj.data) > 1 && obj.data[1] == '"' {
+			return TYPE_STRING
+		}
+	case '[':
+		return TYPE_ARRAY
+	case '{':
+		return TYPE_MAP
+	}
+
+	return TYPE_UNKNOWN
+}
+
+func (obj *MarshalledObject) GetAsBool() (value bool, err error) {
+	err = assertType(obj, TYPE_BOOL)
+	if err != nil {
+		return
+	}
+
+	value, _ = parseBool(obj.data)
+
+	return
+}
+
+func (obj *MarshalledObject) GetAsInteger() (value int64, err error) {
+	err = assertType(obj, TYPE_INTEGER)
+	if err != nil {
+		return
+	}
+
+	value, _ = parseInt(obj.data[1:])
+
+	return
+}
+
+func (obj *MarshalledObject) GetAsFloat() (value float64, err error) {
+	err = assertType(obj, TYPE_FLOAT)
+	if err != nil {
+		return
+	}
+
+	str, _ := parseString(obj.data[1:])
+	value, err = strconv.ParseFloat(str, 64)
+
+	return
+}
+
+func (obj *MarshalledObject) GetAsString() (value string, err error) {
+	if ref := obj.resolveObjectLink(); ref != nil {
+		return ref.GetAsString()
+	}
+
+	err = assertType(obj, TYPE_STRING)
+	if err != nil {
+		return
+	}
+
+	obj.cacheObject(obj)
+
+	var cache []string
+  if obj.data[0] == ':' {
+		value, _ = parseString(obj.data[1:])
+		obj.cacheSymbols(value)
+  } else if obj.data[0] == ';' {
+  	ref_index, _ := parseInt(obj.data[1:])
+    cache := *(obj.symbolCache)
+    value = cache[ref_index]
+	} else {
+		value, _, cache = parseStringWithEncoding(obj.data[2:])
+		obj.cacheSymbols(cache...)
+	}
+
+	return
+}
+
+func (obj *MarshalledObject) GetAsArray() (value []*MarshalledObject, err error) {
+	if ref := obj.resolveObjectLink(); ref != nil {
+		return ref.GetAsArray()
+	}
+
+	err = assertType(obj, TYPE_ARRAY)
+	if err != nil {
+		return
+	}
+
+	obj.cacheObject(obj)
+
+	array_size, offset := parseInt(obj.data[1:])
+  offset += 1
+
+	value = make([]*MarshalledObject, array_size)
+	for i := int64(0); i < array_size; i++ {
+		value_size := newMarshalledObjectWithSize(
+			obj.MajorVersion,
+			obj.MinorVersion,
+			obj.data[offset:],
+			0,
+      obj.symbolCache,
+      obj.objectCache,
+		).getSize()
+
+		value[i] = newMarshalledObject(
+			obj.MajorVersion,
+			obj.MinorVersion,
+			obj.data[offset:offset+value_size],
+      obj.symbolCache,
+      obj.objectCache,
+		)
+		obj.cacheObject(value[i])
+		offset += value_size
+	}
+
+	obj.size = offset
+
+	return
+}
+
+func (obj *MarshalledObject) GetAsMap() (value map[string]*MarshalledObject, err error) {
+	if ref := obj.resolveObjectLink(); ref != nil {
+		return ref.GetAsMap()
+	}
+
+	err = assertType(obj, TYPE_MAP)
+	if err != nil {
+		return
+	}
+
+	obj.cacheObject(obj)
+
+	map_size, offset := parseInt(obj.data[1:])
+	offset += 1
+
+	value = make(map[string]*MarshalledObject, map_size)
+	for i := int64(0); i < map_size; i++ {
+		k := newMarshalledObject(
+			obj.MajorVersion,
+			obj.MinorVersion,
+			obj.data[offset:],
+      obj.symbolCache,
+      obj.objectCache,
+		)
+		obj.cacheObject(k)
+		offset += k.getSize()
+
+		value_size := newMarshalledObjectWithSize(
+			obj.MajorVersion,
+			obj.MinorVersion,
+			obj.data[offset:],
+			0,
+      obj.symbolCache,
+      obj.objectCache,
+		).getSize()
+
+		v := newMarshalledObject(
+			obj.MajorVersion,
+			obj.MinorVersion,
+			obj.data[offset:offset+value_size],
+      obj.symbolCache,
+      obj.objectCache,
+		)
+		obj.cacheObject(v)
+		value[k.ToString()] = v
+
+		offset += value_size
+	}
+
+	obj.size = offset
+
+	return
+}
+
+func assertType(obj *MarshalledObject, expected_type marshalledObjectType) (err error) {
+	if obj.GetType() != expected_type {
+		err = TypeMismatch
+	}
+
+	return
+}
+
+func (obj *MarshalledObject) getSize() int {
+	header_size, data_size := 0, 0
+
+	if len(obj.data) > 0 && obj.data[0] == '@' {
+		header_size = 1
+		_, data_size = parseInt(obj.data[1:])
+		return header_size + data_size
+	}
+
+	switch obj.GetType() {
+	case TYPE_NIL, TYPE_BOOL:
+		header_size = 0
+		data_size   = 1
+	case TYPE_INTEGER:
+		header_size = 1
+		_, data_size = parseInt(obj.data[header_size:])
+	case TYPE_STRING, TYPE_FLOAT:
+		header_size = 1
+
+		if obj.data[0] == ';' {
+			_, data_size = parseInt(obj.data[header_size:])
+		} else {
+			var cache []string
+
+			if obj.data[0] == 'I' {
+				header_size += 1
+				_, data_size, cache = parseStringWithEncoding(obj.data[header_size:])
+				obj.cacheSymbols(cache...)
+			} else {
+				var symbol string
+				symbol, data_size = parseString(obj.data[header_size:])
+				obj.cacheSymbols(symbol)
+			}
+		}
+	case TYPE_ARRAY:
+		if obj.size == 0 {
+			obj.GetAsArray()
+		}
+
+		return obj.size
+	case TYPE_MAP:
+		if obj.size == 0 {
+			obj.GetAsMap()
+		}
+
+		return obj.size
+	}
+
+	return header_size + data_size
+}
+
+func (obj *MarshalledObject) cacheSymbols(symbols ...string) {
+	if len(symbols) == 0 {
+		return
+	}
+
+	cache := *(obj.symbolCache)
+
+	known := make(map[string]struct{})
+	for _, symbol := range cache {
+		known[symbol] = struct{}{}
+	}
+
+	for _, symbol := range symbols {
+		_, exists := known[symbol]
+
+		if ! exists {
+			cache = append(cache, symbol)
+		}
+	}
+
+	*(obj.symbolCache) = cache
+}
+
+func (obj *MarshalledObject) cacheObject(object *MarshalledObject) {
+	if len(object.data) > 0 && (object.data[0] == '@' || object.data[0] == ':' || object.data[0] == ';') {
+		return
+	}
+	if t := obj.GetType(); !(t == TYPE_STRING || t == TYPE_ARRAY || t == TYPE_MAP) {
+		return
+	}
+
+	cache := *(obj.objectCache)
+
+	for _, o := range cache {
+		if object == o {
+			return
+		}
+	}
+	cache = append(cache, object)
+
+	*(obj.objectCache) = cache
+}
+
+func (obj *MarshalledObject) ToString() (str string) {
+	switch obj.GetType() {
+	case TYPE_NIL:
+		str = "<nil>"
+	case TYPE_BOOL:
+		v, _ := obj.GetAsBool()
+
+		if v {
+			str = "true"
+		} else {
+			str = "false"
+		}
+	case TYPE_INTEGER:
+		v, _ := obj.GetAsInteger()
+		str = strconv.FormatInt(v, 10)
+	case TYPE_STRING:
+		str, _ = obj.GetAsString()
+	case TYPE_FLOAT:
+		v, _ := obj.GetAsFloat()
+		str = strconv.FormatFloat(v, 'f', -1, 64)
+	}
+
+	return
+}
+
+func (obj *MarshalledObject) resolveObjectLink() *MarshalledObject {
+	if len(obj.data) > 0 && obj.data[0] == '@' {
+		idx, _ := parseInt(obj.data[1:])
+		cache := *(obj.objectCache)
+
+		if int(idx) < len(cache) {
+			return cache[idx]
+		}
+	}
+
+	return nil
+}
+
+func parseBool(data []byte) (bool, int) {
+	return data[0] == 'T', 1
+}
+
+func parseInt(data []byte) (int64, int) {
+	if data[0] > 0x05 && data[0] < 0xfb {
+		value := int64(data[0])
+
+		if value > 0x7f {
+			return -(0xff ^ value + 1) + 5, 1
+		} else {
+			return value - 5, 1
+		}
+	} else if data[0] <= 0x05 {
+		value := int64(0)
+		i := data[0]
+
+		for ; i > 0; i-- {
+			value = value<<8 + int64(data[i])
+		}
+
+		return value, int(data[0] + 1)
+	} else {
+		value := int64(0)
+		i := 0xff - data[0] + 1
+
+		for ; i > 0; i-- {
+			value = value<<8 + (0xff - int64(data[i]))
+		}
+
+		return -(value + 1), int(0xff - data[0] + 2)
+	}
+}
+
+func parseString(data []byte) (string, int) {
+	length, header_size := parseInt(data)
+	size := int(length) + header_size
+
+  return string(data[header_size : size]), size
+}
+
+func parseStringWithEncoding(data []byte) (string, int, []string) {
+	cache := make([]string, 0)
+  value, size := parseString(data)
+
+  if len(data) > size+1 && (data[size+1] == ':' || data[size+1] == ';') {
+    if data[size+1] == ';' {
+      _, enc_size := parseInt(data[size+2:])
+      size += enc_size + 1
+    } else {
+      enc_symbol, enc_size := parseString(data[size+2:])
+      size += enc_size + 1
+      cache = append(cache, enc_symbol)
+    }
+
+    if data[size+1] == '"' {
+      encoding, enc_name_size := parseString(data[size+2:])
+      _ = encoding
+      size += enc_name_size + 1
+		} else {
+			_, enc_name_size := parseBool(data[size+1:])
+			size += enc_name_size
+		}
+
+		size += 1
+	}
+
+
+	return value, size, cache
+}

+ 419 - 0
go/gopath/src/github.com/adjust/gorails/marshal/marshal_test.go

@@ -0,0 +1,419 @@
+package marshal
+
+import (
+	"testing"
+	"reflect"
+)
+
+func TestCreateMarshalledObject(t *testing.T) {
+	m := CreateMarshalledObject([]byte{4, 8, 1})
+
+	if m.MajorVersion != 4 {
+		t.Errorf("CreateMarshalledObject created an object with Marshal major version set to %v instead of 4", m.MajorVersion)
+	}
+
+	if m.MinorVersion != 8 {
+		t.Errorf("CreateMarshalledObject created an object with Marshal minor version set to %v instead of 8", m.MinorVersion)
+	}
+}
+
+type getTypeTestCase struct {
+	Data        []byte
+	Expectation marshalledObjectType
+}
+
+func TestGetType(t *testing.T) {
+	marshalledObjectTypeNames := []string{"unknown", "nil", "bool", "integer", "float", "string", "array", "map"}
+
+	tests := []getTypeTestCase{
+		// Nil
+		{[]byte{4, 8, 48}, TYPE_NIL},
+		// Booleans
+		{[]byte{4, 8, 70}, TYPE_BOOL}, // false
+		{[]byte{4, 8, 84}, TYPE_BOOL}, // true
+		// Integers
+		{[]byte{4, 8, 105, 0}, TYPE_INTEGER},                 // 0
+		{[]byte{4, 8, 105, 6}, TYPE_INTEGER},                 // 1
+		{[]byte{4, 8, 105, 250}, TYPE_INTEGER},               // -1
+		{[]byte{4, 8, 105, 3, 64, 226, 1}, TYPE_INTEGER},     // 123456
+		{[]byte{4, 8, 105, 253, 192, 29, 254}, TYPE_INTEGER}, // -123456
+		// Floats
+		{[]byte{4, 8, 102, 6, 48}, TYPE_FLOAT},                               // 0.0
+		{[]byte{4, 8, 102, 8, 49, 46, 53}, TYPE_FLOAT},                       // 1.5
+		{[]byte{4, 8, 102, 9, 45, 49, 46, 53}, TYPE_FLOAT},                   // -1.5
+		{[]byte{4, 8, 102, 12, 49, 46, 50, 53, 101, 51, 48}, TYPE_FLOAT},     // 1.25e30
+		{[]byte{4, 8, 102, 13, 49, 46, 50, 53, 101, 45, 51, 48}, TYPE_FLOAT}, // 1.25e-30
+		// Strings
+		{[]byte{4, 8, 73, 34, 0, 6, 58, 6, 69, 84}, TYPE_STRING},                                                           // ''
+		{[]byte{4, 8, 58, 10, 104, 101, 108, 108, 111}, TYPE_STRING},                                                       // :hello
+		{[]byte{4, 8, 73, 34, 17, 72, 101, 108, 108, 111, 44, 32, 119, 111, 114, 108, 100, 6, 58, 6, 69, 84}, TYPE_STRING}, // 'Hello, world'
+		// Arrays
+		{[]byte{4, 8, 91, 0}, TYPE_ARRAY},                                             // []
+		{[]byte{4, 8, 91, 6, 73, 34, 8, 102, 111, 111, 6, 58, 6, 69, 84}, TYPE_ARRAY}, // ["foo"]
+		// Maps (Ruby hashes)
+		{[]byte{4, 8, 123, 0}, TYPE_MAP},                                                                 // {}
+		{[]byte{4, 8, 123, 6, 58, 8, 102, 111, 111, 73, 34, 8, 98, 97, 114, 6, 58, 6, 69, 84}, TYPE_MAP}, // {foo: "bar"}
+	}
+
+	for _, testCase := range tests {
+		object_type := CreateMarshalledObject(testCase.Data).GetType()
+		if object_type != testCase.Expectation {
+			t.Errorf("GetType() returned '%v' instead of '%v'", marshalledObjectTypeNames[int(object_type)], marshalledObjectTypeNames[testCase.Expectation])
+		}
+	}
+}
+
+type getAsBoolTestCase struct {
+	Data        []byte
+	Expectation bool
+}
+
+func TestGetAsBool(t *testing.T) {
+	tests := []getAsBoolTestCase{
+		{[]byte{4, 8, 70}, false},
+		{[]byte{4, 8, 84}, true},
+	}
+
+	value, err := CreateMarshalledObject([]byte{4, 8, 48}).GetAsBool() // should return an error
+	if err == nil {
+		t.Error("GetAsBool() returned no error when attempted to typecast nil to boolean")
+	}
+
+	for _, testCase := range tests {
+		value, err = CreateMarshalledObject(testCase.Data).GetAsBool()
+
+		if err != nil {
+			t.Errorf("GetAsBool() returned an error: '%v' for %v", err.Error(), testCase.Expectation)
+		}
+
+		if value != testCase.Expectation {
+			t.Errorf("GetAsBool() returned '%v' instead of '%v'", value, testCase.Expectation)
+		}
+	}
+}
+
+type getAsIntegerTestCase struct {
+	Data        []byte
+	Expectation int64
+}
+
+func TestGetAsInteger(t *testing.T) {
+	tests := []getAsIntegerTestCase{
+		{[]byte{4, 8, 0x69, 0x00}, 0},
+		{[]byte{4, 8, 0x69, 0x06}, 1},
+		{[]byte{4, 8, 0x69, 0x7f}, 122},
+		{[]byte{4, 8, 0x69, 0x01, 0x7b}, 123},
+		{[]byte{4, 8, 0x69, 0x02, 0x00, 0x01}, 256},
+		{[]byte{4, 8, 0x69, 0x04, 0xff, 0xff, 0xff, 0x3f}, (2 << 29) - 1},
+		{[]byte{4, 8, 0x69, 0xfa}, -1},
+		{[]byte{4, 8, 0x69, 0xff, 0x84}, -124},
+		{[]byte{4, 8, 0x69, 0xfe, 0xff, 0xfe}, -257},
+		{[]byte{4, 8, 0x69, 0xfc, 0x00, 0x00, 0x00, 0xc0}, -(2 << 29)},
+	}
+
+	value, err := CreateMarshalledObject([]byte{4, 8, 48}).GetAsInteger() // should return an error
+	if err == nil {
+		t.Error("GetAsInteger() returned no error when attempted to typecast nil to int")
+	}
+
+	for _, testCase := range tests {
+		value, err = CreateMarshalledObject(testCase.Data).GetAsInteger()
+
+		if err != nil {
+			t.Errorf("GetAsInteger() returned an error: '%v' for %v", err.Error(), testCase.Expectation)
+		}
+
+		if value != testCase.Expectation {
+			t.Errorf("GetAsInteger() returned '%v' instead of '%v'", value, testCase.Expectation)
+		}
+	}
+}
+
+type getAsFloatTestCase struct {
+	Data        []byte
+	Expectation float64
+}
+
+func TestGetAsFloat(t *testing.T) {
+	tests := []getAsFloatTestCase{
+		{[]byte{4, 8, 102, 6, 48}, 0.0},
+		{[]byte{4, 8, 102, 13, 49, 46, 52, 51, 101, 45, 49, 48}, 1.43e-10},
+		{[]byte{4, 8, 102, 13, 49, 46, 52, 51, 101, 45, 49, 48}, 1.43E-10},
+		{[]byte{4, 8, 102, 10, 48, 46, 49, 50, 53}, 0.125},
+		{[]byte{4, 8, 102, 10, 49, 50, 46, 53, 54}, 12.56},
+		{[]byte{4, 8, 102, 12, 49, 46, 52, 51, 101, 49, 48}, 1.43e+10},
+		{[]byte{4, 8, 102, 12, 49, 46, 52, 51, 101, 49, 48}, 1.43E+10},
+		{[]byte{4, 8, 102, 14, 45, 49, 46, 52, 51, 101, 45, 49, 48}, -1.43e-10},
+		{[]byte{4, 8, 102, 14, 45, 49, 46, 52, 51, 101, 45, 49, 48}, -1.43E-10},
+		{[]byte{4, 8, 102, 11, 45, 48, 46, 49, 50, 53}, -0.125},
+		{[]byte{4, 8, 102, 11, 45, 49, 50, 46, 53, 54}, -12.56},
+		{[]byte{4, 8, 102, 13, 45, 49, 46, 52, 51, 101, 49, 48}, -1.43e+10},
+		{[]byte{4, 8, 102, 13, 45, 49, 46, 52, 51, 101, 49, 48}, -1.43E+10},
+	}
+
+	value, err := CreateMarshalledObject([]byte{4, 8, 48}).GetAsFloat() // should return an error
+	if err == nil {
+		t.Error("GetAsFloat() returned no error when attempted to typecast nil to float")
+	}
+
+	for _, testCase := range tests {
+		value, err = CreateMarshalledObject(testCase.Data).GetAsFloat()
+
+		if err != nil {
+			t.Errorf("GetAsFloat() returned an error: '%v' for %v", err.Error(), testCase.Expectation)
+		}
+
+		if value != testCase.Expectation {
+			t.Errorf("GetAsFloat() returned '%v' instead of '%v'", value, testCase.Expectation)
+		}
+	}
+}
+
+type getAsStringTestCase struct {
+	Data        []byte
+	Expectation string
+}
+
+func TestGetAsString(t *testing.T) {
+	tests := []getAsStringTestCase{
+		{[]byte{4, 8, 73, 34, 0, 6, 58, 6, 69, 84}, ""},                                                                       // ''
+		{[]byte{4, 8, 58, 10, 104, 101, 108, 108, 111}, "hello"},                                                              // :hello
+		{[]byte{4, 8, 73, 34, 17, 72, 101, 108, 108, 111, 44, 32, 119, 111, 114, 108, 100, 6, 58, 6, 69, 84}, "Hello, world"}, // 'Hello, world'
+	}
+
+	value, err := CreateMarshalledObject([]byte{4, 8, 48}).GetAsString() // should return an error
+	if err == nil {
+		t.Error("GetAsString() returned no error when attempted to typecast nil to string")
+	}
+
+	for _, testCase := range tests {
+		value, err = CreateMarshalledObject(testCase.Data).GetAsString()
+
+		if err != nil {
+			t.Errorf("GetAsString() returned an error: '%v' for %v", err.Error(), testCase.Expectation)
+		}
+
+		if value != testCase.Expectation {
+			t.Errorf("GetAsString() returned '%v' instead of '%v'", value, testCase.Expectation)
+		}
+	}
+}
+
+type getAsArrayOfIntsTestCase struct {
+	Data        []byte
+	Expectation []int64
+}
+
+type getAsArrayOfStringsTestCase struct {
+	Data        []byte
+	Expectation []string
+}
+
+func TestGetAsArray(t *testing.T) {
+	int_tests := []getAsArrayOfIntsTestCase{
+		{[]byte{4, 8, 91, 0}, []int64{}},
+		{[]byte{4, 8, 91, 10, 105, 255, 0, 105, 250, 105, 0, 105, 6, 105, 2, 0, 1}, []int64{-256, -1, 0, 1, 256}},
+	}
+
+	_, err := CreateMarshalledObject([]byte{4, 8, 48}).GetAsArray() // should return an error
+	if err == nil {
+		t.Error("GetAsArray() returned no error when attempted to typecast nil to array")
+	}
+
+	for _, testCase := range int_tests {
+		value, err := CreateMarshalledObject(testCase.Data).GetAsArray()
+
+		if err != nil {
+			t.Errorf("GetAsArray() returned an error: '%v' for %v", err.Error(), testCase.Expectation)
+		}
+
+		if len(value) != len(testCase.Expectation) {
+			t.Error("GetAsArray() returned an array with length %d for %v", len(value), testCase.Expectation)
+		} else {
+			for i, v := range value {
+				value, err := v.GetAsInteger()
+
+				if err != nil {
+					t.Error("GetAsArray() returned an error '%v' for element #%d (%d) of %v", err.Error(), i, testCase.Expectation[i], testCase.Expectation)
+				}
+
+				if value != testCase.Expectation[i] {
+					t.Errorf("GetAsArray() returned '%v' instead of '%v'", value, testCase.Expectation)
+				}
+			}
+		}
+	}
+
+	string_tests := []getAsArrayOfStringsTestCase{
+		{[]byte{4, 8, 91, 6, 73, 34, 8, 102, 111, 111, 6, 58, 6, 69, 84}, []string{"foo"}}, // ["foo"]
+		{[]byte{4, 8, 91, 6, 58, 8, 98, 97, 114}, []string{"bar"}}, // [:bar]
+		{[]byte{4, 8, 91, 8, 73, 34, 8, 102, 111, 111, 6, 58, 6, 69, 84, 73, 34, 8, 98, 97, 114, 6, 59, 0, 84, 58, 8, 98, 97, 122}, []string{"foo", "bar", "baz"}}, // ["foo", "bar", :baz]
+		{[]byte{4, 8, 91, 8, 73, 34, 8, 102, 111, 111, 6, 58, 6, 69, 84, 73, 34, 8, 98, 97, 114, 6, 58, 13, 101, 110, 99, 111, 100, 105, 110, 103, 34, 14, 83, 104, 105, 102, 116, 95, 74, 73, 83, 58, 8, 98, 97, 122}, []string{"foo", "bar", "baz"}}, // ["foo", "bar".force_encoding("SHIFT_JIS"), :baz]
+		{[]byte{4, 8, 91, 7, 73, 34, 6, 120, 6, 58, 6, 69, 84, 64, 6}, []string{"x", "x"}},
+	}
+
+	for _, testCase := range string_tests {
+		value, err := CreateMarshalledObject(testCase.Data).GetAsArray()
+
+		if err != nil {
+			t.Errorf("GetAsArray() returned an error: '%v' for %v", err.Error(), testCase.Expectation)
+		}
+
+		if len(value) != len(testCase.Expectation) {
+			t.Errorf("GetAsArray() returned an array with length %d for %v", len(value), testCase.Expectation)
+		} else {
+			for i, v := range value {
+				value, err := v.GetAsString()
+
+				if err != nil {
+					t.Errorf("GetAsArray() returned an error '%v' for element #%d (%v %d) of %v", err.Error(), i, testCase.Expectation[i], v.GetType(), testCase.Expectation)
+				}
+
+				if value != testCase.Expectation[i] {
+					t.Errorf("GetAsArray() returned '%v' instead of '%v'", value, testCase.Expectation)
+				}
+			}
+		}
+	}
+}
+
+type getAsMapOfIntsTestCase struct {
+	Data        []byte
+	Expectation map[string]int64
+}
+
+type getAsMapOfStringsTestCase struct {
+	Data        []byte
+	Expectation map[string]string
+}
+
+type getAsMapOfMapsTestCase struct {
+	Data        []byte
+	Expectation map[string]map[string]int64
+}
+
+func TestGetAsMap(t *testing.T) {
+	int_tests := []getAsMapOfIntsTestCase{
+		{
+			[]byte{4, 8, 123, 0},
+			map[string]int64{},
+		},
+		{
+			[]byte{4, 8, 123, 12, 73, 34, 6, 48, 6, 58, 6, 69, 84, 105, 0, 105, 6, 105, 6, 105, 250, 105, 250, 48, 105, 255, 0, 73, 34, 8, 102, 111, 111, 6, 59, 0, 84, 105, 2, 0, 1, 73, 34, 8, 98, 97, 114, 6, 58, 13, 101, 110, 99, 111, 100, 105, 110, 103, 34, 14, 83, 104, 105, 102, 116, 95, 74, 73, 83, 105, 2, 188, 2, 58, 8, 98, 97, 122, 105, 254, 68, 253},
+			map[string]int64{
+				"0":     0,
+				"1":     1,
+				"-1":    -1,
+				"<nil>": -256,
+				"foo":   256,
+				"bar":   700,
+				"baz":   -700,
+			},
+		},
+	}
+
+	_, err := CreateMarshalledObject([]byte{4, 8, 48}).GetAsMap() // should return an error
+	if err == nil {
+		t.Error("GetAsMap() returned no error when attempted to typecast nil to map")
+	}
+
+	for _, testCase := range int_tests {
+		value, err := CreateMarshalledObject(testCase.Data).GetAsMap()
+
+		m := make(map[string]int64)
+		for k, v := range value {
+			m[k], err = v.GetAsInteger()
+
+			if err != nil {
+				t.Errorf("GetAsMap() returned an error while parsing %s", k)
+			}
+		}
+
+		if ! reflect.DeepEqual(m, testCase.Expectation) {
+			t.Errorf("%v is not equal %v", m, testCase.Expectation)
+		}
+	}
+
+	string_tests := []getAsMapOfStringsTestCase{
+		{
+			[]byte{4, 8, 123, 12, 73, 34, 6, 48, 6, 58, 6, 69, 84, 73, 34, 6, 48, 6, 59, 0, 84, 105, 6, 73, 34, 6, 49, 6, 59, 0, 84, 105, 250, 73, 34, 0, 6, 59, 0, 84, 48, 73, 34, 8, 102, 111, 111, 6, 59, 0, 84, 73, 34, 8, 102, 111, 111, 6, 59, 0, 84, 73, 34, 8, 98, 97, 114, 6, 58, 13, 101, 110, 99, 111, 100, 105, 110, 103, 34, 14, 83, 104, 105, 102, 116, 95, 74, 73, 83, 73, 34, 8, 98, 97, 114, 6, 59, 0, 84, 58, 8, 98, 97, 122, 59, 7, 73, 34, 6, 48, 6, 59, 0, 84},
+			map[string]string{
+				"0":     "0",   // "0" => "0"
+				"1":     "1",   // 1 => "1"
+				"-1":    "",    // -1 => ""
+				"<nil>": "foo", // nil => "foo"
+				"foo":   "bar", // "foo" => "bar".force_encoding("SHIFT_JIS")
+				"bar":   "baz", // "bar".force_encoding("SHIFT_JIS") => :baz
+				"baz":   "0",   // :baz => "0"
+			},
+		},
+		{
+			[]byte{4, 8, 123, 8, 58, 6, 97, 73, 34, 6, 120, 6, 58, 6, 69, 84, 58, 6, 98, 64, 6, 58, 6, 99, 64, 6},
+			map[string]string{
+				"a":  "x",
+				"b":  "x",
+				"c":  "x",
+			},
+		},
+	}
+
+	for _, testCase := range string_tests {
+		value, err := CreateMarshalledObject(testCase.Data).GetAsMap()
+
+		m := make(map[string]string)
+		for k, v := range value {
+			m[k], err = v.GetAsString()
+
+			if err != nil {
+				t.Errorf("GetAsMap() returned an error while parsing %s %d: %s", k, v.GetType(), err.Error())
+			}
+		}
+
+		if ! reflect.DeepEqual(m, testCase.Expectation) {
+			t.Errorf("%v is not equal %v", m, testCase.Expectation)
+		}
+	}
+
+	map_tests := []getAsMapOfMapsTestCase{
+		{
+			[]byte{4, 8, 123, 8, 58, 6, 97, 123, 6, 73, 34, 6, 120, 6, 58, 6, 69, 84, 105, 6, 58, 6, 98, 64, 6, 58, 6, 99, 64, 6},
+			map[string]map[string]int64{
+				"a":  map[string]int64{"x": 1},
+				"b":  map[string]int64{"x": 1},
+				"c":  map[string]int64{"x": 1},
+			},
+		},
+	}
+
+	for _, testCase := range map_tests {
+		value, _ := CreateMarshalledObject(testCase.Data).GetAsMap()
+
+		m := make(map[string]map[string]int64)
+		for k, v := range value {
+			vv, err := v.GetAsMap()
+
+			if err != nil {
+				t.Errorf("GetAsMap() returned an error while parsing %s", v)
+			}
+
+			m2 := make(map[string]int64)
+			for k2, v2 := range vv {
+				m2[k2], err = v2.GetAsInteger()
+
+				if err != nil {
+					t.Errorf("GetAsInteger() returned an error while parsing %s", v2)
+				}
+			}
+
+			m[k] = m2
+
+			if err != nil {
+				t.Errorf("GetAsMap() returned an error while parsing %s %d: %s", k, v.GetType(), err.Error())
+			}
+		}
+
+		if ! reflect.DeepEqual(m, testCase.Expectation) {
+			t.Errorf("%v is not equal %v", m, testCase.Expectation)
+		}
+	}
+}

+ 35 - 0
go/gopath/src/github.com/adjust/gorails/session/README.md

@@ -0,0 +1,35 @@
+gorails/session
+===============
+
+[![Build Status](https://travis-ci.org/adjust/gorails.png)](https://travis-ci.org/adjust/gorails)
+
+## Installation
+
+With Go and git installed:
+
+```
+go get -u code.google.com/p/go.crypto/pbkdf2
+go get -u github.com/adjust/gorails/session
+```
+
+Or you can use [Goem](http://big-elephants.com/2013-09/goem-the-missing-go-extension-manager/).
+
+## Usage
+
+```go
+import "github.com/adjust/gorails/session"
+
+// session_cookie - raw _<your app name>_session cookie
+func getRailsSessionData(session_cookie string) (decrypted_cookie_data []byte, err error) {
+  decrypted_cookie_data, err = session.DecryptSignedCookie(session_cookie, secret_key_base, salt)
+
+  return
+}
+
+const (
+  secret_key_base = "..." // can be found in config/initializers/secret_token.rb
+  salt = "encrypted cookie" // default value for Rails 4 app
+)
+```
+
+After you decrypted session data you might like to deserialize it using [gorails/marshal](https://github.com/adjust/gorails/tree/master/marshal)

+ 73 - 0
go/gopath/src/github.com/adjust/gorails/session/session.go

@@ -0,0 +1,73 @@
+package session
+
+import (
+	"crypto/aes"
+	"crypto/cipher"
+	"crypto/sha1"
+	"encoding/base64"
+	"net/url"
+	"strings"
+
+	"code.google.com/p/go.crypto/pbkdf2"
+)
+
+func generateSecret(base, salt string) []byte {
+	return pbkdf2.Key([]byte(base), []byte(salt), key_iter_num, key_size, sha1.New)
+}
+
+func decodeCookieData(cookie []byte) (data, iv []byte, err error) {
+	vectors := strings.SplitN(string(cookie), "--", 2)
+
+	data, err = base64.StdEncoding.DecodeString(vectors[0])
+	if err != nil {
+		return
+	}
+
+	iv, err = base64.StdEncoding.DecodeString(vectors[1])
+	if err != nil {
+		return
+	}
+
+	return
+}
+
+func decryptCookie(cookie []byte, secret []byte) (dd []byte, err error) {
+	data, iv, err := decodeCookieData(cookie)
+
+	c, err := aes.NewCipher(secret[:32])
+	if err != nil {
+		return
+	}
+
+	cfb := cipher.NewCBCDecrypter(c, iv)
+	dd = make([]byte, len(data))
+	cfb.CryptBlocks(dd, data)
+
+	return
+}
+
+func DecryptSignedCookie(signed_cookie, secret_key_base, salt string) (session []byte, err error) {
+	cookie, err := url.QueryUnescape(signed_cookie)
+	if err != nil {
+		return
+	}
+
+	vectors := strings.SplitN(cookie, "--", 2)
+	data, err := base64.StdEncoding.DecodeString(vectors[0])
+	if err != nil {
+		return
+	}
+
+	session, err = decryptCookie(data, generateSecret(secret_key_base, salt))
+	if err != nil {
+		return
+	}
+
+	return
+}
+
+// Rails 4.0 defaults
+const (
+	key_iter_num = 1000
+	key_size     = 64
+)

+ 30 - 0
go/gopath/src/github.com/alidayu/README.md

@@ -0,0 +1,30 @@
+# alidayu
+阿里大鱼Go语言开发包。One Golang package for alidayu service. 
+
+> 阿里大鱼API说明文档:[http://open.taobao.com/doc2/apiDetail.htm?spm=0.0.0.0.bkKKhG&apiId=25450](http://open.taobao.com/doc2/apiDetail.htm?spm=0.0.0.0.bkKKhG&apiId=25450)  
+官网:[http://alidayu.com](http://alidayu.com)
+
+###使用方法:
+
+- 发送短信:`alidayu.SendSMS`
+- 文本转语音通知:`alidayu.CallTTS`
+- 发送语音通知:`alidayu.CallVoice`
+- 发起双方通话:`alidayu.CallDouble`
+
+```
+package main
+
+import (
+	"fmt"
+	"github.com/ltt1987/alidayu"
+)
+
+func main() {
+	alidayu.AppKey = "...your AppKey..."
+	alidayu.AppSecret = "...your AppSecret..."
+
+	success, resp := alidayu.SendSMS("18888888888", "身份验证", "SMS_4000328", `{"code":"1234","product":"alidayu"}`)
+	fmt.Println("Success:", success)
+	fmt.Println(resp)
+}
+```

+ 34 - 0
go/gopath/src/github.com/alidayu/body.go

@@ -0,0 +1,34 @@
+package alidayu
+
+import (
+	"crypto/md5"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/url"
+	"sort"
+	"strings"
+)
+
+func getRequestBody(m map[string]string) (reader io.Reader, size int64) {
+	var keys []string
+	for k := range m {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+
+	v := url.Values{}
+
+	signString := AppSecret
+	for _, k := range keys {
+		v.Set(k, m[k])
+		signString += k + m[k]
+	}
+	signString += AppSecret
+
+	signByte := md5.Sum([]byte(signString))
+	sign := strings.ToUpper(fmt.Sprintf("%x", signByte))
+	v.Set("sign", sign)
+
+	return ioutil.NopCloser(strings.NewReader(v.Encode())), int64(len(v.Encode()))
+}

+ 23 - 0
go/gopath/src/github.com/alidayu/call_double.go

@@ -0,0 +1,23 @@
+package alidayu
+
+import "time"
+
+func CallDouble(caller_num, caller_show_num, called_num, called_show_num string) (success bool, response string) {
+	if caller_num == "" || caller_show_num == "" || called_num == "" || called_show_num == "" {
+		return false, "Parameter not complete"
+	}
+
+	params := make(map[string]string)
+	params["app_key"] = AppKey
+	params["format"] = "json"
+	params["method"] = Method_CallVoice
+	params["sign_method"] = "md5"
+	params["timestamp"] = time.Now().Format("2006-01-02 15:04:05")
+	params["v"] = "2.0"
+	params["caller_num"] = caller_num
+	params["caller_show_num"] = caller_show_num
+	params["called_num"] = called_num
+	params["called_show_num"] = called_show_num
+
+	return DoPost(params)
+}

+ 23 - 0
go/gopath/src/github.com/alidayu/call_tts.go

@@ -0,0 +1,23 @@
+package alidayu
+
+import "time"
+
+func CallTTS(called_num, called_show_num, tts_code, tts_param string) (success bool, response string) {
+	if called_num == "" || called_show_num == "" || tts_code == "" {
+		return false, "Parameter not complete"
+	}
+
+	params := make(map[string]string)
+	params["app_key"] = AppKey
+	params["format"] = "json"
+	params["method"] = Method_CallTTS
+	params["sign_method"] = "md5"
+	params["timestamp"] = time.Now().Format("2006-01-02 15:04:05")
+	params["v"] = "2.0"
+	params["called_show_num"] = called_show_num
+	params["called_num"] = called_num
+	params["tts_code"] = tts_code
+	params["tts_param"] = tts_param
+
+	return DoPost(params)
+}

+ 22 - 0
go/gopath/src/github.com/alidayu/call_voice.go

@@ -0,0 +1,22 @@
+package alidayu
+
+import "time"
+
+func CallVoice(called_num, called_show_num, voice_code string) (success bool, response string) {
+	if called_num == "" || called_show_num == "" || voice_code == "" {
+		return false, "Parameter not complete"
+	}
+
+	params := make(map[string]string)
+	params["app_key"] = AppKey
+	params["format"] = "json"
+	params["method"] = Method_CallVoice
+	params["sign_method"] = "md5"
+	params["timestamp"] = time.Now().Format("2006-01-02 15:04:05")
+	params["v"] = "2.0"
+	params["called_show_num"] = called_show_num
+	params["called_num"] = called_num
+	params["voice_code"] = voice_code
+
+	return DoPost(params)
+}

+ 12 - 0
go/gopath/src/github.com/alidayu/const.go

@@ -0,0 +1,12 @@
+package alidayu
+
+const (
+	URL               string = "http://gw.api.taobao.com/router/rest"
+	Method_SendSMS    string = "alibaba.aliqin.fc.sms.num.send"
+	Method_CallTTS    string = "alibaba.aliqin.fc.tts.num.singlecall"
+	Method_CallVoice  string = "alibaba.aliqin.fc.voice.num.singlecall"
+	Method_CallDouble string = "alibaba.aliqin.fc.voice.num.doublecall"
+)
+
+var AppKey string
+var AppSecret string

+ 34 - 0
go/gopath/src/github.com/alidayu/post.go

@@ -0,0 +1,34 @@
+package alidayu
+
+import (
+	"io/ioutil"
+	"net/http"
+	"strings"
+)
+
+func DoPost(m map[string]string) (success bool, response string) {
+	if AppKey == "" || AppSecret == "" {
+		return false, "AppKey or AppSecret is requierd!"
+	}
+
+	body, size := getRequestBody(m)
+	client := &http.Client{}
+	req, _ := http.NewRequest("POST", URL, body)
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	req.ContentLength = size
+
+	resp, err := client.Do(req)
+	defer resp.Body.Close()
+	if err != nil {
+		response = err.Error()
+		return
+	}
+
+	data, _ := ioutil.ReadAll(resp.Body)
+	response = string(data)
+	if strings.Contains(response, "success") {
+		return true, response
+	} else {
+		return false, response
+	}
+}

+ 27 - 0
go/gopath/src/github.com/alidayu/sms_send.go

@@ -0,0 +1,27 @@
+package alidayu
+
+import (
+	"time"
+)
+
+func SendSMS(rec_num, sms_free_sign_name, sms_template_code, sms_param string) (success bool, response string) {
+	if rec_num == "" || sms_free_sign_name == "" || sms_template_code == "" {
+		return false, "Parameter not complete"
+	}
+
+	params := make(map[string]string)
+	params["app_key"] = AppKey
+	params["format"] = "json"
+	params["method"] = Method_SendSMS
+	params["sign_method"] = "md5"
+	params["timestamp"] = time.Now().Format("2006-01-02 15:04:05")
+	params["v"] = "2.0"
+	params["sms_type"] = "normal"
+	params["sms_free_sign_name"] = sms_free_sign_name
+	params["rec_num"] = rec_num
+	params["sms_template_code"] = sms_template_code
+	params["sms_param"] = sms_param
+
+	return DoPost(params)
+
+}

+ 164 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/README.md

@@ -0,0 +1,164 @@
+# Aliyun OSS SDK for Go
+[![GitHub version](https://badge.fury.io/gh/aliyun%2Faliyun-oss-go-sdk.svg)](https://badge.fury.io/gh/aliyun%2Faliyun-oss-go-sdk)
+[![Build Status](https://travis-ci.org/aliyun/aliyun-oss-go-sdk.svg?branch=master)](https://travis-ci.org/aliyun/aliyun-oss-go-sdk)
+[![Coverage Status](https://coveralls.io/repos/github/aliyun/aliyun-oss-go-sdk/badge.svg?branch=master)](https://coveralls.io/github/aliyun/aliyun-oss-go-sdk?branch=master)
+## 关于
+> - 此Go SDK基于[阿里云对象存储服务](http://www.aliyun.com/product/oss/)官方API构建。
+> - 阿里云对象存储(Object Storage Service,简称OSS),是阿里云对外提供的海量,安全,低成本,高可靠的云存储服务。
+> - OSS适合存放任意文件类型,适合各种网站、开发企业及开发者使用。
+> - 使用此SDK,用户可以方便地在任何应用、任何时间、任何地点上传,下载和管理数据。
+
+## 版本
+> - 当前版本:1.2.0
+
+## 运行环境
+> - 推荐使用Go 1.4及以上。
+
+## 安装方法
+### GitHub安装
+> - 执行命令`go get github.com/aliyun/aliyun-oss-go-sdk/oss`获取远程代码包。
+> - 在您的代码中使用`import "github.com/aliyun/aliyun-oss-go-sdk/oss"`引入OSS Go SDK的包。
+
+## 快速使用
+#### 获取存储空间列表(List Bucket)
+```go
+    client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    lsRes, err := client.ListBuckets()
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    for _, bucket := range lsRes.Buckets {
+        fmt.Println("Buckets:", bucket.Name)
+    }
+```
+
+#### 创建存储空间(Create Bucket)
+```go
+    client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    err = client.CreateBucket("my-bucket")
+    if err != nil {
+        // HandleError(err)
+    }
+```
+    
+#### 删除存储空间(Delete Bucket)
+```go
+    client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    err = client.DeleteBucket("my-bucket")
+    if err != nil {
+        // HandleError(err)
+    }
+```
+
+#### 上传文件(Put Object)
+```go
+    client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    bucket, err := client.Bucket("my-bucket")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    err = bucket.PutObjectFromFile("my-object", "LocalFile")
+    if err != nil {
+        // HandleError(err)
+    }
+```
+
+#### 下载文件 (Get Object)
+```go
+    client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    bucket, err := client.Bucket("my-bucket")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    err = bucket.GetObjectToFile("my-object", "LocalFile")
+    if err != nil {
+        // HandleError(err)
+    }
+```
+
+#### 获取文件列表(List Objects)
+```go
+    client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    bucket, err := client.Bucket("my-bucket")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    lsRes, err := bucket.ListObjects()
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    for _, object := range lsRes.Objects {
+        fmt.Println("Objects:", object.Key)
+    }
+```
+    
+#### 删除文件(Delete Object)
+```go
+    client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    bucket, err := client.Bucket("my-bucket")
+    if err != nil {
+        // HandleError(err)
+    }
+    
+    err = bucket.DeleteObject("my-object")
+    if err != nil {
+        // HandleError(err)
+    }
+```
+
+#### 其它
+更多的示例程序,请参看OSS Go SDK安装路径(即GOPATH变量中的第一个路径)下的`src\github.com\aliyun\aliyun-oss-go-sdk\sample`,该目录下为示例程序,
+或者参看`https://github.com/aliyun/aliyun-oss-go-sdk`下sample目录中的示例文件。
+
+## 注意事项
+### 运行sample
+> - 拷贝示例文件。到OSS Go SDK的安装路径(即GOPATH变量中的第一个路径),进入OSS Go SDK的代码目录`src\github.com\aliyun\aliyun-oss-go-sdk`,
+把其下的sample目录和sample.go复制到您的测试工程src目录下。
+> - 修改sample/config.go里的endpoint、AccessKeyId、AccessKeySecret、BucketName等配置。
+> - 请在您的工程目录下执行`go run src/sample.go`。
+
+## 联系我们
+> - [阿里云OSS官方网站](http://oss.aliyun.com)
+> - [阿里云OSS官方论坛](http://bbs.aliyun.com)
+> - [阿里云OSS官方文档中心](http://www.aliyun.com/product/oss#Docs)
+> - 阿里云官方技术支持:[提交工单](https://workorder.console.aliyun.com/#/ticket/createIndex)
+
+## 作者
+> - Yubin Bai
+> - Hǎiliàng Wáng
+
+## License
+> - Apache License 2.0

+ 92 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go

@@ -0,0 +1,92 @@
+package oss
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"crypto/sha1"
+	"encoding/base64"
+	"hash"
+	"io"
+	"net/http"
+	"sort"
+	"strings"
+)
+
+// 用于signHeader的字典排序存放容器。
+type headerSorter struct {
+	Keys []string
+	Vals []string
+}
+
+// 生成签名方法(直接设置请求的Header)。
+func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
+	// Find out the "x-oss-"'s address in this request'header
+	temp := make(map[string]string)
+
+	for k, v := range req.Header {
+		if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
+			temp[strings.ToLower(k)] = v[0]
+		}
+	}
+	hs := newHeaderSorter(temp)
+
+	// Sort the temp by the Ascending Order
+	hs.Sort()
+
+	// Get the CanonicalizedOSSHeaders
+	canonicalizedOSSHeaders := ""
+	for i := range hs.Keys {
+		canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
+	}
+
+	// Give other parameters values
+	date := req.Header.Get(HTTPHeaderDate)
+	contentType := req.Header.Get(HTTPHeaderContentType)
+	contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
+
+	signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
+	h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret))
+	io.WriteString(h, signStr)
+	signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
+
+	// Get the final Authorization' string
+	authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + signedStr
+
+	// Give the parameter "Authorization" value
+	req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
+}
+
+// Additional function for function SignHeader.
+func newHeaderSorter(m map[string]string) *headerSorter {
+	hs := &headerSorter{
+		Keys: make([]string, 0, len(m)),
+		Vals: make([]string, 0, len(m)),
+	}
+
+	for k, v := range m {
+		hs.Keys = append(hs.Keys, k)
+		hs.Vals = append(hs.Vals, v)
+	}
+	return hs
+}
+
+// Additional function for function SignHeader.
+func (hs *headerSorter) Sort() {
+	sort.Sort(hs)
+}
+
+// Additional function for function SignHeader.
+func (hs *headerSorter) Len() int {
+	return len(hs.Vals)
+}
+
+// Additional function for function SignHeader.
+func (hs *headerSorter) Less(i, j int) bool {
+	return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
+}
+
+// Additional function for function SignHeader.
+func (hs *headerSorter) Swap(i, j int) {
+	hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
+	hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
+}

+ 618 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go

@@ -0,0 +1,618 @@
+package oss
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/base64"
+	"encoding/xml"
+	"hash/crc64"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"os"
+	"strconv"
+)
+
+// Bucket implements the operations of object.
+type Bucket struct {
+	Client     Client
+	BucketName string
+}
+
+//
+// PutObject 新建Object,如果Object已存在,覆盖原有Object。
+//
+// objectKey  上传对象的名称,使用UTF-8编码、长度必须在1-1023字节之间、不能以“/”或者“\”字符开头。
+// reader     io.Reader读取object的数据。
+// options    上传对象时可以指定对象的属性,可用选项有CacheControl、ContentDisposition、ContentEncoding、
+// Expires、ServerSideEncryption、ObjectACL、Meta,具体含义请参看
+// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
+//
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error {
+	opts := addContentType(options, objectKey)
+
+	request := &PutObjectRequest{
+		ObjectKey: objectKey,
+		Reader:    reader,
+	}
+	resp, err := bucket.DoPutObject(request, opts)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	return err
+}
+
+//
+// PutObjectFromFile 新建Object,内容从本地文件中读取。
+//
+// objectKey 上传对象的名称。
+// filePath  本地文件,上传对象的值为该文件内容。
+// options   上传对象时可以指定对象的属性。详见PutObject的options。
+//
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error {
+	fd, err := os.Open(filePath)
+	if err != nil {
+		return err
+	}
+	defer fd.Close()
+
+	opts := addContentType(options, filePath, objectKey)
+
+	request := &PutObjectRequest{
+		ObjectKey: objectKey,
+		Reader:    fd,
+	}
+	resp, err := bucket.DoPutObject(request, opts)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	return err
+}
+
+//
+// DoPutObject 上传文件。
+//
+// request  上传请求。
+// options  上传选项。
+//
+// Response 上传请求返回值。
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) {
+	isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType)
+	if !isOptSet {
+		options = addContentType(options, request.ObjectKey)
+	}
+
+	resp, err := bucket.do("PUT", request.ObjectKey, "", "", options, request.Reader)
+	if err != nil {
+		return nil, err
+	}
+
+	if bucket.getConfig().IsEnableCRC {
+		err = checkCRC(resp, "DoPutObject")
+		if err != nil {
+			return resp, err
+		}
+	}
+
+	err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
+
+	return resp, err
+}
+
+//
+// GetObject 下载文件。
+//
+// objectKey 下载的文件名称。
+// options   对象的属性限制项,可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、
+// IfNoneMatch、AcceptEncoding,详细请参考
+// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
+//
+// io.ReadCloser  reader,读取数据后需要close。error为nil时有效。
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) {
+	result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
+	if err != nil {
+		return nil, err
+	}
+	return result.Response.Body, nil
+}
+
+//
+// GetObjectToFile 下载文件。
+//
+// objectKey  下载的文件名称。
+// filePath   下载对象的内容写到该本地文件。
+// options    对象的属性限制项。详见GetObject的options。
+//
+// error  操作无错误时返回error为nil,非nil为错误说明。
+//
+func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error {
+	// 读取Object内容
+	result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
+	if err != nil {
+		return err
+	}
+	defer result.Response.Body.Close()
+
+	// 如果文件不存在则创建,存在则清空
+	fd, err := os.OpenFile(filePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660)
+	if err != nil {
+		return err
+	}
+	defer fd.Close()
+
+	// 存储数据到文件
+	_, err = io.Copy(fd, result.Response.Body)
+	if err != nil {
+		return err
+	}
+
+	// 比较CRC值
+	hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
+	if bucket.getConfig().IsEnableCRC && !hasRange {
+		result.Response.ClientCRC = result.ClientCRC.Sum64()
+		err = checkCRC(result.Response, "GetObjectToFile")
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+//
+// DoGetObject 下载文件
+//
+// request 下载请求
+// options    对象的属性限制项。详见GetObject的options。
+//
+// GetObjectResult 下载请求返回值。
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) {
+	resp, err := bucket.do("GET", request.ObjectKey, "", "", options, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	result := &GetObjectResult{
+		Response: resp,
+	}
+
+	hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
+	if bucket.getConfig().IsEnableCRC && !hasRange {
+		crcCalc := crc64.New(crcTable())
+		resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, crcCalc))
+		result.ServerCRC = resp.ServerCRC
+		result.ClientCRC = crcCalc
+	}
+
+	return result, nil
+}
+
+//
+// CopyObject 同一个bucket内拷贝Object。
+//
+// srcObjectKey  Copy的源对象。
+// destObjectKey Copy的目标对象。
+// options  Copy对象时,您可以指定源对象的限制条件,满足限制条件时copy,不满足时返回错误,您可以选择如下选项CopySourceIfMatch、
+// CopySourceIfNoneMatch、CopySourceIfModifiedSince、CopySourceIfUnmodifiedSince、MetadataDirective。
+// Copy对象时,您可以指定目标对象的属性,如CacheControl、ContentDisposition、ContentEncoding、Expires、
+// ServerSideEncryption、ObjectACL、Meta,选项的含义请参看
+// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
+	var out CopyObjectResult
+	options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
+	resp, err := bucket.do("PUT", destObjectKey, "", "", options, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// CopyObjectTo bucket间拷贝object。
+//
+// srcObjectKey   源Object名称。源Bucket名称为Bucket.BucketName。
+// destBucketName  目标Bucket名称。
+// destObjectKey  目标Object名称。
+// options        Copy选项,详见CopyObject的options。
+//
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) {
+	return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
+}
+
+//
+// CopyObjectFrom bucket间拷贝object。
+//
+// srcBucketName  源Bucket名称。
+// srcObjectKey   源Object名称。
+// destObjectKey  目标Object名称。目标Bucket名称为Bucket.BucketName。
+// options        Copy选项,详见CopyObject的options。
+//
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
+	destBucketName := bucket.BucketName
+	var out CopyObjectResult
+	srcBucket, err := bucket.Client.Bucket(srcBucketName)
+	if err != nil {
+		return out, err
+	}
+
+	return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
+}
+
+func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) {
+	var out CopyObjectResult
+	options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
+	headers := make(map[string]string)
+	err := handleOptions(headers, options)
+	if err != nil {
+		return out, err
+	}
+	resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, "", "", headers, nil, 0)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// AppendObject 追加方式上传。
+//
+// AppendObject参数必须包含position,其值指定从何处进行追加。首次追加操作的position必须为0,
+// 后续追加操作的position是Object的当前长度。例如,第一次Append Object请求指定position值为0,
+// content-length是65536;那么,第二次Append Object需要指定position为65536。
+// 每次操作成功后,响应头部x-oss-next-append-position也会标明下一次追加的position。
+//
+// objectKey  需要追加的Object。
+// reader     io.Reader,读取追的内容。
+// appendPosition  object追加的起始位置。
+// destObjectProperties  第一次追加时指定新对象的属性,如CacheControl、ContentDisposition、ContentEncoding、
+// Expires、ServerSideEncryption、ObjectACL。
+//
+// int64 下次追加的开始位置,error为nil空时有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) {
+	request := &AppendObjectRequest{
+		ObjectKey: objectKey,
+		Reader:    reader,
+		Position:  appendPosition,
+	}
+
+	result, err := bucket.DoAppendObject(request, options)
+
+	return result.NextPosition, err
+}
+
+//
+// DoAppendObject 追加上传。
+//
+// request 追加上传请求。
+// options 追加上传选项。
+//
+// AppendObjectResult 追加上传请求返回值。
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) {
+	params := "append&position=" + strconv.FormatInt(request.Position, 10)
+	headers := make(map[string]string)
+
+	opts := addContentType(options, request.ObjectKey)
+	handleOptions(headers, opts)
+
+	var initCRC uint64
+	isCRCSet, initCRCStr, _ := isOptionSet(options, initCRC64)
+	if isCRCSet {
+		initCRC, _ = strconv.ParseUint(initCRCStr, 10, 64)
+	}
+
+	handleOptions(headers, opts)
+	resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, params, headers, request.Reader, initCRC)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64)
+	result := &AppendObjectResult{
+		NextPosition: nextPosition,
+		CRC:          resp.ServerCRC,
+	}
+
+	if bucket.getConfig().IsEnableCRC && isCRCSet {
+		err = checkCRC(resp, "AppendObject")
+		if err != nil {
+			return result, err
+		}
+	}
+
+	return result, nil
+}
+
+//
+// DeleteObject 删除Object。
+//
+// objectKey 待删除Object。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DeleteObject(objectKey string) error {
+	resp, err := bucket.do("DELETE", objectKey, "", "", nil, nil)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// DeleteObjects 批量删除object。
+//
+// objectKeys 待删除object类表。
+// options 删除选项,DeleteObjectsQuiet,是否是安静模式,默认不使用。
+//
+// DeleteObjectsResult 非安静模式的的返回值。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) {
+	out := DeleteObjectsResult{}
+	dxml := deleteXML{}
+	for _, key := range objectKeys {
+		dxml.Objects = append(dxml.Objects, DeleteObject{Key: key})
+	}
+	isQuietStr, _ := findOption(options, deleteObjectsQuiet, "FALSE")
+	isQuiet, _ := strconv.ParseBool(isQuietStr)
+	dxml.Quiet = isQuiet
+	encode := "&encoding-type=url"
+
+	bs, err := xml.Marshal(dxml)
+	if err != nil {
+		return out, err
+	}
+	buffer := new(bytes.Buffer)
+	buffer.Write(bs)
+
+	contentType := http.DetectContentType(buffer.Bytes())
+	options = append(options, ContentType(contentType))
+	sum := md5.Sum(bs)
+	b64 := base64.StdEncoding.EncodeToString(sum[:])
+	options = append(options, ContentMD5(b64))
+	resp, err := bucket.do("POST", "", "delete"+encode, "delete", options, buffer)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	if !dxml.Quiet {
+		if err = xmlUnmarshal(resp.Body, &out); err == nil {
+			err = decodeDeleteObjectsResult(&out)
+		}
+	}
+	return out, err
+}
+
+//
+// IsObjectExist object是否存在。
+//
+// bool  object是否存在,true存在,false不存在。error为nil时有效。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
+	listRes, err := bucket.ListObjects(Prefix(objectKey), MaxKeys(1))
+	if err != nil {
+		return false, err
+	}
+
+	if len(listRes.Objects) == 1 && listRes.Objects[0].Key == objectKey {
+		return true, nil
+	}
+	return false, nil
+}
+
+//
+// ListObjects 获得Bucket下筛选后所有的object的列表。
+//
+// options  ListObject的筛选行为。Prefix指定的前缀、MaxKeys最大数目、Marker第一个开始、Delimiter对Object名字进行分组的字符。
+//
+// 您有如下8个object,my-object-1, my-object-11, my-object-2, my-object-21,
+// my-object-22, my-object-3, my-object-31, my-object-32。如果您指定了Prefix为my-object-2,
+// 则返回my-object-2, my-object-21, my-object-22三个object。如果您指定了Marker为my-object-22,
+// 则返回my-object-3, my-object-31, my-object-32三个object。如果您指定MaxKeys则每次最多返回MaxKeys个,
+// 最后一次可能不足。这三个参数可以组合使用,实现分页等功能。如果把prefix设为某个文件夹名,就可以罗列以此prefix开头的文件,
+// 即该文件夹下递归的所有的文件和子文件夹。如果再把delimiter设置为"/"时,返回值就只罗列该文件夹下的文件,该文件夹下的子文件名
+// 返回在CommonPrefixes部分,子文件夹下递归的文件和文件夹不被显示。例如一个bucket存在三个object,fun/test.jpg、
+// fun/movie/001.avi、fun/movie/007.avi。若设定prefix为"fun/",则返回三个object;如果增加设定
+// delimiter为"/",则返回文件"fun/test.jpg"和前缀"fun/movie/",即实现了文件夹的逻辑。
+//
+// 常用场景,请参数示例sample/list_object.go。
+//
+// ListObjectsResponse  操作成功后的返回值,成员Objects为bucket中对象列表。error为nil时该返回值有效。
+//
+func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
+	var out ListObjectsResult
+
+	options = append(options, EncodingType("url"))
+	params, err := handleParams(options)
+	if err != nil {
+		return out, err
+	}
+
+	resp, err := bucket.do("GET", "", params, "", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	if err != nil {
+		return out, err
+	}
+
+	err = decodeListObjectsResult(&out)
+	return out, err
+}
+
+//
+// SetObjectMeta 设置Object的Meta。
+//
+// objectKey object
+// options 指定对象的属性,有以下可选项CacheControl、ContentDisposition、ContentEncoding、Expires、
+// ServerSideEncryption、Meta。
+//
+// error 操作无错误时error为nil,非nil为错误信息。
+//
+func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
+	options = append(options, MetadataDirective(MetaReplace))
+	_, err := bucket.CopyObject(objectKey, objectKey, options...)
+	return err
+}
+
+//
+// GetObjectDetailedMeta 查询Object的头信息。
+//
+// objectKey object名称。
+// objectPropertyConstraints 对象的属性限制项,满足时正常返回,不满足时返回错误。现在项有IfModifiedSince、IfUnmodifiedSince、
+// IfMatch、IfNoneMatch。具体含义请参看 https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
+//
+// http.Header  对象的meta,error为nil时有效。
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) {
+	resp, err := bucket.do("HEAD", objectKey, "", "", options, nil)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	return resp.Headers, nil
+}
+
+//
+// GetObjectMeta 查询Object的头信息。
+//
+// GetObjectMeta相比GetObjectDetailedMeta更轻量,仅返回指定Object的少量基本meta信息,
+// 包括该Object的ETag、Size(对象大小)、LastModified,其中Size由响应头Content-Length的数值表示。
+//
+// objectKey object名称。
+//
+// http.Header 对象的meta,error为nil时有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
+	resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	return resp.Headers, nil
+}
+
+//
+// SetObjectACL 修改Object的ACL权限。
+//
+// 只有Bucket Owner才有权限调用PutObjectACL来修改Object的ACL。Object ACL优先级高于Bucket ACL。
+// 例如Bucket ACL是private的,而Object ACL是public-read-write的,则访问这个Object时,
+// 先判断Object的ACL,所以所有用户都拥有这个Object的访问权限,即使这个Bucket是private bucket。
+// 如果某个Object从来没设置过ACL,则访问权限遵循Bucket ACL。
+//
+// Object的读操作包括GetObject,HeadObject,CopyObject和UploadPartCopy中的对source object的读;
+// Object的写操作包括:PutObject,PostObject,AppendObject,DeleteObject,
+// DeleteMultipleObjects,CompleteMultipartUpload以及CopyObject对新的Object的写。
+//
+// objectKey 设置权限的object。
+// objectAcl 对象权限。可选值PrivateACL(私有读写)、PublicReadACL(公共读私有写)、PublicReadWriteACL(公共读写)。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
+	options := []Option{ObjectACL(objectACL)}
+	resp, err := bucket.do("PUT", objectKey, "acl", "acl", options, nil)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// GetObjectACL 获取对象的ACL权限。
+//
+// objectKey 获取权限的object。
+//
+// GetObjectAclResponse 获取权限操作返回值,error为nil时有效。GetObjectAclResponse.Acl为对象的权限。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) {
+	var out GetObjectACLResult
+	resp, err := bucket.do("GET", objectKey, "acl", "acl", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+// Private
+func (bucket Bucket) do(method, objectName, urlParams, subResource string,
+	options []Option, data io.Reader) (*Response, error) {
+	headers := make(map[string]string)
+	err := handleOptions(headers, options)
+	if err != nil {
+		return nil, err
+	}
+	return bucket.Client.Conn.Do(method, bucket.BucketName, objectName,
+		urlParams, subResource, headers, data, 0)
+}
+
+func (bucket Bucket) getConfig() *Config {
+	return bucket.Client.Config
+}
+
+func addContentType(options []Option, keys ...string) []Option {
+	typ := TypeByExtension("")
+	for _, key := range keys {
+		typ = TypeByExtension(key)
+		if typ != "" {
+			break
+		}
+	}
+
+	if typ == "" {
+		typ = "application/octet-stream"
+	}
+
+	opts := []Option{ContentType(typ)}
+	opts = append(opts, options...)
+
+	return opts
+}

Разница между файлами не показана из-за своего большого размера
+ 1687 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket_test.go


+ 739 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go

@@ -0,0 +1,739 @@
+// Package oss implements functions for access oss service.
+// It has two main struct Client and Bucket.
+package oss
+
+import (
+	"bytes"
+	"encoding/xml"
+	"io"
+	"net/http"
+	"strings"
+	"time"
+)
+
+//
+// Client Sdk的入口,Client的方法可以完成bucket的各种操作,如create/delete bucket,
+// set/get acl/lifecycle/referer/logging/website等。文件(object)的上传下载通过Bucket完成。
+// 用户用oss.New创建Client。
+//
+type (
+	// Client oss client
+	Client struct {
+		Config *Config // Oss Client configure
+		Conn   *Conn   // Send http request
+	}
+
+	// ClientOption client option such as UseCname, Timeout, SecurityToken.
+	ClientOption func(*Client)
+)
+
+//
+// New 生成一个新的Client。
+//
+// endpoint        用户Bucket所在数据中心的访问域名,如http://oss-cn-hangzhou.aliyuncs.com。
+// accessKeyId     用户标识。
+// accessKeySecret 用户密钥。
+//
+// Client 生成的新Client。error为nil时有效。
+// error  操作无错误时为nil,非nil时表示操作出错。
+//
+func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) {
+	config := getDefaultOssConfig()
+	config.Endpoint = endpoint
+	config.AccessKeyID = accessKeyID
+	config.AccessKeySecret = accessKeySecret
+
+	url := &urlMaker{}
+	url.Init(config.Endpoint, config.IsCname, config.IsUseProxy)
+	conn := &Conn{config, url}
+
+	client := &Client{
+		config,
+		conn,
+	}
+
+	for _, option := range options {
+		option(client)
+	}
+
+	return client, nil
+}
+
+//
+// Bucket 取存储空间(Bucket)的对象实例。
+//
+// bucketName 存储空间名称。
+// Bucket     新的Bucket。error为nil时有效。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) Bucket(bucketName string) (*Bucket, error) {
+	return &Bucket{
+		client,
+		bucketName,
+	}, nil
+}
+
+//
+// CreateBucket 创建Bucket。
+//
+// bucketName bucket名称,在整个OSS中具有全局唯一性,且不能修改。bucket名称的只能包括小写字母,数字和短横线-,
+// 必须以小写字母或者数字开头,长度必须在3-255字节之间。
+// options  创建bucket的选项。您可以使用选项ACL,指定bucket的访问权限。Bucket有以下三种访问权限,私有读写(ACLPrivate)、
+// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite),默认访问权限是私有读写。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) CreateBucket(bucketName string, options ...Option) error {
+	headers := make(map[string]string)
+	handleOptions(headers, options)
+
+	resp, err := client.do("PUT", bucketName, "", "", headers, nil)
+	if err != nil {
+		return err
+	}
+
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// ListBuckets 获取当前用户下的bucket。
+//
+// options 指定ListBuckets的筛选行为,Prefix、Marker、MaxKeys三个选项。Prefix限定前缀。
+// Marker设定从Marker之后的第一个开始返回。MaxKeys限定此次返回的最大数目,默认为100。
+// 常用使用场景的实现,参数示例程序list_bucket.go。
+// ListBucketsResponse 操作成功后的返回值,error为nil时该返回值有效。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
+	var out ListBucketsResult
+
+	params, err := handleParams(options)
+	if err != nil {
+		return out, err
+	}
+
+	resp, err := client.do("GET", "", params, "", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// IsBucketExist Bucket是否存在。
+//
+// bucketName 存储空间名称。
+//
+// bool  存储空间是否存在。error为nil时有效。
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) IsBucketExist(bucketName string) (bool, error) {
+	listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1))
+	if err != nil {
+		return false, err
+	}
+
+	if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName {
+		return true, nil
+	}
+	return false, nil
+}
+
+//
+// DeleteBucket 删除空存储空间。非空时请先清理Object、Upload。
+//
+// bucketName 存储空间名称。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) DeleteBucket(bucketName string) error {
+	resp, err := client.do("DELETE", bucketName, "", "", nil, nil)
+	if err != nil {
+		return err
+	}
+
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketLocation 查看Bucket所属数据中心位置的信息。
+//
+// 如果您想了解"访问域名和数据中心"详细信息,请参看
+// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html
+//
+// bucketName 存储空间名称。
+//
+// string Bucket所属的数据中心位置信息。
+// error  操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) GetBucketLocation(bucketName string) (string, error) {
+	resp, err := client.do("GET", bucketName, "location", "location", nil, nil)
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+
+	var LocationConstraint string
+	err = xmlUnmarshal(resp.Body, &LocationConstraint)
+	return LocationConstraint, err
+}
+
+//
+// SetBucketACL 修改Bucket的访问权限。
+//
+// bucketName 存储空间名称。
+// bucketAcl  bucket的访问权限。Bucket有以下三种访问权限,Bucket有以下三种访问权限,私有读写(ACLPrivate)、
+// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite)。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
+	headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)}
+	resp, err := client.do("PUT", bucketName, "", "", headers, nil)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// GetBucketACL 获得Bucket的访问权限。
+//
+// bucketName 存储空间名称。
+//
+// GetBucketAclResponse 操作成功后的返回值,error为nil时该返回值有效。
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) {
+	var out GetBucketACLResult
+	resp, err := client.do("GET", bucketName, "acl", "acl", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// SetBucketLifecycle 修改Bucket的生命周期设置。
+//
+// OSS提供Object生命周期管理来为用户管理对象。用户可以为某个Bucket定义生命周期配置,来为该Bucket的Object定义各种规则。
+// Bucket的拥有者可以通过SetBucketLifecycle来设置Bucket的Lifecycle配置。Lifecycle开启后,OSS将按照配置,
+// 定期自动删除与Lifecycle规则相匹配的Object。如果您想了解更多的生命周期的信息,请参看
+// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html
+//
+// bucketName 存储空间名称。
+// rules 生命周期规则列表。生命周期规则有两种格式,指定绝对和相对过期时间,分布由days和year/month/day控制。
+// 具体用法请参考示例程序sample/bucket_lifecycle.go。
+//
+// error 操作无错误时返回error为nil,非nil为错误信息。
+//
+func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
+	lxml := lifecycleXML{Rules: convLifecycleRule(rules)}
+	bs, err := xml.Marshal(lxml)
+	if err != nil {
+		return err
+	}
+	buffer := new(bytes.Buffer)
+	buffer.Write(bs)
+
+	contentType := http.DetectContentType(buffer.Bytes())
+	headers := map[string]string{}
+	headers[HTTPHeaderContentType] = contentType
+
+	resp, err := client.do("PUT", bucketName, "lifecycle", "lifecycle", headers, buffer)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// DeleteBucketLifecycle 删除Bucket的生命周期设置。
+//
+//
+// bucketName 存储空间名称。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) DeleteBucketLifecycle(bucketName string) error {
+	resp, err := client.do("DELETE", bucketName, "lifecycle", "lifecycle", nil, nil)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketLifecycle 查看Bucket的生命周期设置。
+//
+// bucketName 存储空间名称。
+//
+// GetBucketLifecycleResponse 操作成功的返回值,error为nil时该返回值有效。Rules为该bucket上的规则列表。
+// error 操作无错误时为nil,非nil为错误信息。
+//
+func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) {
+	var out GetBucketLifecycleResult
+	resp, err := client.do("GET", bucketName, "lifecycle", "lifecycle", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// SetBucketReferer 设置bucket的referer访问白名单和是否允许referer字段为空的请求访问。
+//
+// 防止用户在OSS上的数据被其他人盗用,OSS支持基于HTTP header中表头字段referer的防盗链方法。可以通过OSS控制台或者API的方式对
+// 一个bucket设置referer字段的白名单和是否允许referer字段为空的请求访问。例如,对于一个名为oss-example的bucket,
+// 设置其referer白名单为http://www.aliyun.com。则所有referer为http://www.aliyun.com的请求才能访问oss-example
+// 这个bucket中的object。如果您还需要了解更多信息,请参看
+// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html
+//
+// bucketName  存储空间名称。
+// referers  访问白名单列表。一个bucket可以支持多个referer参数。referer参数支持通配符"*"和"?"。
+// 用法请参看示例sample/bucket_referer.go
+// allowEmptyReferer  指定是否允许referer字段为空的请求访问。 默认为true。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error {
+	rxml := RefererXML{}
+	rxml.AllowEmptyReferer = allowEmptyReferer
+	if referers == nil {
+		rxml.RefererList = append(rxml.RefererList, "")
+	} else {
+		for _, referer := range referers {
+			rxml.RefererList = append(rxml.RefererList, referer)
+		}
+	}
+
+	bs, err := xml.Marshal(rxml)
+	if err != nil {
+		return err
+	}
+	buffer := new(bytes.Buffer)
+	buffer.Write(bs)
+
+	contentType := http.DetectContentType(buffer.Bytes())
+	headers := map[string]string{}
+	headers[HTTPHeaderContentType] = contentType
+
+	resp, err := client.do("PUT", bucketName, "referer", "referer", headers, buffer)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// GetBucketReferer 获得Bucket的白名单地址。
+//
+// bucketName 存储空间名称。
+//
+// GetBucketRefererResponse 操作成功的返回值,error为nil时该返回值有效。
+// error 操作无错误时为nil,非nil为错误信息。
+//
+func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) {
+	var out GetBucketRefererResult
+	resp, err := client.do("GET", bucketName, "referer", "referer", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// SetBucketLogging 修改Bucket的日志设置。
+//
+// OSS为您提供自动保存访问日志记录功能。Bucket的拥有者可以开启访问日志记录功能。当一个bucket开启访问日志记录功能后,
+// OSS自动将访问这个bucket的请求日志,以小时为单位,按照固定的命名规则,生成一个Object写入用户指定的bucket中。
+// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
+//
+// bucketName   需要记录访问日志的Bucket。
+// targetBucket 访问日志记录到的Bucket。
+// targetPrefix bucketName中需要存储访问日志记录的object前缀。为空记录所有object的访问日志。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string,
+	isEnable bool) error {
+	var err error
+	var bs []byte
+	if isEnable {
+		lxml := LoggingXML{}
+		lxml.LoggingEnabled.TargetBucket = targetBucket
+		lxml.LoggingEnabled.TargetPrefix = targetPrefix
+		bs, err = xml.Marshal(lxml)
+	} else {
+		lxml := loggingXMLEmpty{}
+		bs, err = xml.Marshal(lxml)
+	}
+
+	if err != nil {
+		return err
+	}
+
+	buffer := new(bytes.Buffer)
+	buffer.Write(bs)
+
+	contentType := http.DetectContentType(buffer.Bytes())
+	headers := map[string]string{}
+	headers[HTTPHeaderContentType] = contentType
+
+	resp, err := client.do("PUT", bucketName, "logging", "logging", headers, buffer)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// DeleteBucketLogging 删除Bucket的日志设置。
+//
+// bucketName 需要删除访问日志的Bucket。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) DeleteBucketLogging(bucketName string) error {
+	resp, err := client.do("DELETE", bucketName, "logging", "logging", nil, nil)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketLogging 获得Bucket的日志设置。
+//
+// bucketName  需要删除访问日志的Bucket。
+// GetBucketLoggingResponse  操作成功的返回值,error为nil时该返回值有效。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) {
+	var out GetBucketLoggingResult
+	resp, err := client.do("GET", bucketName, "logging", "logging", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// SetBucketWebsite 设置/修改Bucket的默认首页以及错误页。
+//
+// OSS支持静态网站托管,Website操作可以将一个bucket设置成静态网站托管模式 。您可以将自己的Bucket配置成静态网站托管模式。
+// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
+//
+// bucketName     需要设置Website的Bucket。
+// indexDocument  索引文档。
+// errorDocument  错误文档。
+//
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error {
+	wxml := WebsiteXML{}
+	wxml.IndexDocument.Suffix = indexDocument
+	wxml.ErrorDocument.Key = errorDocument
+
+	bs, err := xml.Marshal(wxml)
+	if err != nil {
+		return err
+	}
+	buffer := new(bytes.Buffer)
+	buffer.Write(bs)
+
+	contentType := http.DetectContentType(buffer.Bytes())
+	headers := make(map[string]string)
+	headers[HTTPHeaderContentType] = contentType
+
+	resp, err := client.do("PUT", bucketName, "website", "website", headers, buffer)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// DeleteBucketWebsite 删除Bucket的Website设置。
+//
+// bucketName  需要删除website设置的Bucket。
+//
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (client Client) DeleteBucketWebsite(bucketName string) error {
+	resp, err := client.do("DELETE", bucketName, "website", "website", nil, nil)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketWebsite 获得Bucket的默认首页以及错误页。
+//
+// bucketName 存储空间名称。
+//
+// GetBucketWebsiteResponse 操作成功的返回值,error为nil时该返回值有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) {
+	var out GetBucketWebsiteResult
+	resp, err := client.do("GET", bucketName, "website", "website", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// SetBucketCORS 设置Bucket的跨域访问(CORS)规则。
+//
+// 跨域访问的更多信息,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
+//
+// bucketName 需要设置Website的Bucket。
+// corsRules  待设置的CORS规则。用法请参看示例代码sample/bucket_cors.go。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error {
+	corsxml := CORSXML{}
+	for _, v := range corsRules {
+		cr := CORSRule{}
+		cr.AllowedMethod = v.AllowedMethod
+		cr.AllowedOrigin = v.AllowedOrigin
+		cr.AllowedHeader = v.AllowedHeader
+		cr.ExposeHeader = v.ExposeHeader
+		cr.MaxAgeSeconds = v.MaxAgeSeconds
+		corsxml.CORSRules = append(corsxml.CORSRules, cr)
+	}
+
+	bs, err := xml.Marshal(corsxml)
+	if err != nil {
+		return err
+	}
+	buffer := new(bytes.Buffer)
+	buffer.Write(bs)
+
+	contentType := http.DetectContentType(buffer.Bytes())
+	headers := map[string]string{}
+	headers[HTTPHeaderContentType] = contentType
+
+	resp, err := client.do("PUT", bucketName, "cors", "cors", headers, buffer)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// DeleteBucketCORS 删除Bucket的Website设置。
+//
+// bucketName 需要删除cors设置的Bucket。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) DeleteBucketCORS(bucketName string) error {
+	resp, err := client.do("DELETE", bucketName, "cors", "cors", nil, nil)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketCORS 获得Bucket的CORS设置。
+//
+//
+// bucketName  存储空间名称。
+// GetBucketCORSResult  操作成功的返回值,error为nil时该返回值有效。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) {
+	var out GetBucketCORSResult
+	resp, err := client.do("GET", bucketName, "cors", "cors", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// GetBucketInfo 获得Bucket的信息。
+//
+// bucketName  存储空间名称。
+// GetBucketInfoResult  操作成功的返回值,error为nil时该返回值有效。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) {
+	var out GetBucketInfoResult
+	resp, err := client.do("GET", bucketName, "bucketInfo", "bucketInfo", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// UseCname 设置是否使用CNAME,默认不使用。
+//
+// isUseCname true设置endpoint格式是cname格式,false为非cname格式,默认false
+//
+func UseCname(isUseCname bool) ClientOption {
+	return func(client *Client) {
+		client.Config.IsCname = isUseCname
+		client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
+	}
+}
+
+//
+// Timeout 设置HTTP超时时间。
+//
+// connectTimeoutSec HTTP链接超时时间,单位是秒,默认10秒。0表示永不超时。
+// readWriteTimeout  HTTP发送接受数据超时时间,单位是秒,默认20秒。0表示永不超时。
+//
+func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
+	return func(client *Client) {
+		client.Config.HTTPTimeout.ConnectTimeout =
+			time.Second * time.Duration(connectTimeoutSec)
+		client.Config.HTTPTimeout.ReadWriteTimeout =
+			time.Second * time.Duration(readWriteTimeout)
+		client.Config.HTTPTimeout.HeaderTimeout =
+			time.Second * time.Duration(readWriteTimeout)
+		client.Config.HTTPTimeout.LongTimeout =
+			time.Second * time.Duration(readWriteTimeout*10)
+	}
+}
+
+//
+// SecurityToken 临时用户设置SecurityToken。
+//
+// token STS token
+//
+func SecurityToken(token string) ClientOption {
+	return func(client *Client) {
+		client.Config.SecurityToken = strings.TrimSpace(token)
+	}
+}
+
+//
+// EnableMD5 是否启用MD5校验,默认启用。
+//
+// isEnableMD5 true启用MD5校验,false不启用MD5校验
+//
+func EnableMD5(isEnableMD5 bool) ClientOption {
+	return func(client *Client) {
+		client.Config.IsEnableMD5 = isEnableMD5
+	}
+}
+
+//
+// MD5ThresholdCalcInMemory 使用内存计算MD5值的上限,默认16MB。
+//
+// threshold 单位Byte。上传内容小于threshold在MD5在内存中计算,大于使用临时文件计算MD5
+//
+func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
+	return func(client *Client) {
+		client.Config.MD5Threshold = threshold
+	}
+}
+
+//
+// EnableCRC 上传是否启用CRC校验,默认启用。
+//
+// isEnableCRC true启用CRC校验,false不启用CRC校验
+//
+func EnableCRC(isEnableCRC bool) ClientOption {
+	return func(client *Client) {
+		client.Config.IsEnableCRC = isEnableCRC
+	}
+}
+
+//
+// UserAgent 指定UserAgent,默认如下aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2)。
+//
+// userAgent user agent字符串。
+//
+func UserAgent(userAgent string) ClientOption {
+	return func(client *Client) {
+		client.Config.UserAgent = userAgent
+	}
+}
+
+//
+// Proxy 设置代理服务器,默认不使用代理。
+//
+// proxyHost 代理服务器地址,格式是host或host:port
+//
+func Proxy(proxyHost string) ClientOption {
+	return func(client *Client) {
+		client.Config.IsUseProxy = true
+		client.Config.ProxyHost = proxyHost
+		client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
+	}
+}
+
+//
+// AuthProxy 设置需要认证的代理服务器,默认不使用代理。
+//
+// proxyHost 代理服务器地址,格式是host或host:port
+// proxyUser 代理服务器认证的用户名
+// proxyPassword 代理服务器认证的用户密码
+//
+func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
+	return func(client *Client) {
+		client.Config.IsUseProxy = true
+		client.Config.ProxyHost = proxyHost
+		client.Config.IsAuthProxy = true
+		client.Config.ProxyUser = proxyUser
+		client.Config.ProxyPassword = proxyPassword
+		client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
+	}
+}
+
+// Private
+func (client Client) do(method, bucketName, urlParams, subResource string,
+	headers map[string]string, data io.Reader) (*Response, error) {
+	return client.Conn.Do(method, bucketName, "", urlParams,
+		subResource, headers, data, 0)
+}

Разница между файлами не показана из-за своего большого размера
+ 1377 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/client_test.go


+ 67 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go

@@ -0,0 +1,67 @@
+package oss
+
+import (
+	"time"
+)
+
+// HTTPTimeout http timeout
+type HTTPTimeout struct {
+	ConnectTimeout   time.Duration
+	ReadWriteTimeout time.Duration
+	HeaderTimeout    time.Duration
+	LongTimeout      time.Duration
+}
+
+// Config oss configure
+type Config struct {
+	Endpoint        string      // oss地址
+	AccessKeyID     string      // accessId
+	AccessKeySecret string      // accessKey
+	RetryTimes      uint        // 失败重试次数,默认5
+	UserAgent       string      // SDK名称/版本/系统信息
+	IsDebug         bool        // 是否开启调试模式,默认false
+	Timeout         uint        // 超时时间,默认60s
+	SecurityToken   string      // STS Token
+	IsCname         bool        // Endpoint是否是CNAME
+	HTTPTimeout     HTTPTimeout // HTTP的超时时间设置
+	IsUseProxy      bool        // 是否使用代理
+	ProxyHost       string      // 代理服务器地址
+	IsAuthProxy     bool        // 代理服务器是否使用用户认证
+	ProxyUser       string      // 代理服务器认证用户名
+	ProxyPassword   string      // 代理服务器认证密码
+	IsEnableMD5     bool        // 上传数据时是否启用MD5校验
+	MD5Threshold    int64       // 内存中计算MD5的上线大小,大于该值启用临时文件,单位Byte
+	IsEnableCRC     bool        // 上传数据时是否启用CRC64校验
+}
+
+// 获取默认配置
+func getDefaultOssConfig() *Config {
+	config := Config{}
+
+	config.Endpoint = ""
+	config.AccessKeyID = ""
+	config.AccessKeySecret = ""
+	config.RetryTimes = 5
+	config.IsDebug = false
+	config.UserAgent = userAgent
+	config.Timeout = 60 // seconds
+	config.SecurityToken = ""
+	config.IsCname = false
+
+	config.HTTPTimeout.ConnectTimeout = time.Second * 30   // 30s
+	config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
+	config.HTTPTimeout.HeaderTimeout = time.Second * 60    // 60s
+	config.HTTPTimeout.LongTimeout = time.Second * 300     // 300s
+
+	config.IsUseProxy = false
+	config.ProxyHost = ""
+	config.IsAuthProxy = false
+	config.ProxyUser = ""
+	config.ProxyPassword = ""
+
+	config.MD5Threshold = 16 * 1024 * 1024 // 16MB
+	config.IsEnableMD5 = false
+	config.IsEnableCRC = true
+
+	return &config
+}

+ 420 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go

@@ -0,0 +1,420 @@
+package oss
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/base64"
+	"encoding/xml"
+	"fmt"
+	"hash"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Conn oss conn
+type Conn struct {
+	config *Config
+	url    *urlMaker
+}
+
+// Do 处理请求,返回响应结果。
+func (conn Conn) Do(method, bucketName, objectName, urlParams, subResource string,
+	headers map[string]string, data io.Reader, initCRC uint64) (*Response, error) {
+	uri := conn.url.getURL(bucketName, objectName, urlParams)
+	resource := conn.url.getResource(bucketName, objectName, subResource)
+	return conn.doRequest(method, uri, resource, headers, data, initCRC)
+}
+
+func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string,
+	headers map[string]string, data io.Reader, initCRC uint64) (*Response, error) {
+	httpTimeOut := conn.config.HTTPTimeout
+	method = strings.ToUpper(method)
+	if !conn.config.IsUseProxy {
+		uri.Opaque = uri.Path
+	}
+	req := &http.Request{
+		Method:     method,
+		URL:        uri,
+		Proto:      "HTTP/1.1",
+		ProtoMajor: 1,
+		ProtoMinor: 1,
+		Header:     make(http.Header),
+		Host:       uri.Host,
+	}
+
+	fd, crc := conn.handleBody(req, data, initCRC)
+	if fd != nil {
+		defer func() {
+			fd.Close()
+			os.Remove(fd.Name())
+		}()
+	}
+
+	date := time.Now().UTC().Format(http.TimeFormat)
+	req.Header.Set(HTTPHeaderDate, date)
+	req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
+	req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
+	if conn.config.SecurityToken != "" {
+		req.Header.Set(HTTPHeaderOssSecurityToken, conn.config.SecurityToken)
+	}
+
+	if headers != nil {
+		for k, v := range headers {
+			req.Header.Set(k, v)
+		}
+	}
+
+	conn.signHeader(req, canonicalizedResource)
+
+	var transport *http.Transport
+	if conn.config.IsUseProxy {
+		// proxy
+		proxyURL, err := url.Parse(conn.config.ProxyHost)
+		if err != nil {
+			return nil, err
+		}
+
+		transport = &http.Transport{
+			Proxy: http.ProxyURL(proxyURL),
+			Dial: func(netw, addr string) (net.Conn, error) {
+				conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
+				if err != nil {
+					return nil, err
+				}
+				return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
+			},
+			ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
+			MaxIdleConnsPerHost:   2000,
+		}
+
+		if conn.config.IsAuthProxy {
+			auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
+			basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
+			req.Header.Set("Proxy-Authorization", basic)
+		}
+	} else {
+		// no proxy
+		transport = &http.Transport{
+			Dial: func(netw, addr string) (net.Conn, error) {
+				conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
+				if err != nil {
+					return nil, err
+				}
+				return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
+			},
+			ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
+			MaxIdleConnsPerHost:   2000,
+		}
+	}
+
+	timeoutClient := &http.Client{Transport: transport}
+
+	resp, err := timeoutClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+
+	return conn.handleResponse(resp, crc)
+}
+
+// handle request body
+func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64) (*os.File, hash.Hash64) {
+	var file *os.File
+	var crc hash.Hash64
+	reader := body
+
+	// length
+	switch v := body.(type) {
+	case *bytes.Buffer:
+		req.ContentLength = int64(v.Len())
+	case *bytes.Reader:
+		req.ContentLength = int64(v.Len())
+	case *strings.Reader:
+		req.ContentLength = int64(v.Len())
+	case *os.File:
+		req.ContentLength = tryGetFileSize(v)
+	}
+	req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
+
+	// md5
+	if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" {
+		if req.ContentLength == 0 || req.ContentLength > conn.config.MD5Threshold {
+			// huge body, use temporary file
+			file, _ = ioutil.TempFile(os.TempDir(), TempFilePrefix)
+			if file != nil {
+				io.Copy(file, body)
+				file.Seek(0, os.SEEK_SET)
+				md5 := md5.New()
+				io.Copy(md5, file)
+				sum := md5.Sum(nil)
+				b64 := base64.StdEncoding.EncodeToString(sum[:])
+				req.Header.Set(HTTPHeaderContentMD5, b64)
+				file.Seek(0, os.SEEK_SET)
+				reader = file
+			}
+		} else {
+			// small body, use memory
+			buf, _ := ioutil.ReadAll(body)
+			sum := md5.Sum(buf)
+			b64 := base64.StdEncoding.EncodeToString(sum[:])
+			req.Header.Set(HTTPHeaderContentMD5, b64)
+			reader = bytes.NewReader(buf)
+		}
+	}
+
+	if reader != nil && conn.config.IsEnableCRC {
+		crc = NewCRC(crcTable(), initCRC)
+		reader = io.TeeReader(reader, crc)
+	}
+
+	rc, ok := reader.(io.ReadCloser)
+	if !ok && reader != nil {
+		rc = ioutil.NopCloser(reader)
+	}
+	req.Body = rc
+
+	return file, crc
+}
+
+func tryGetFileSize(f *os.File) int64 {
+	fInfo, _ := f.Stat()
+	return fInfo.Size()
+}
+
+// handle response
+func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) {
+	var cliCRC uint64
+	var srvCRC uint64
+
+	statusCode := resp.StatusCode
+	if statusCode >= 400 && statusCode <= 505 {
+		// 4xx and 5xx indicate that the operation has error occurred
+		var respBody []byte
+		respBody, err := readResponseBody(resp)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(respBody) == 0 {
+			// no error in response body
+			err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status)
+		} else {
+			// response contains storage service error object, unmarshal
+			srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
+				resp.Header.Get(HTTPHeaderOssRequestID))
+			if err != nil { // error unmarshaling the error response
+				err = errIn
+			}
+			err = srvErr
+		}
+		return &Response{
+			StatusCode: resp.StatusCode,
+			Headers:    resp.Header,
+			Body:       ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
+		}, err
+	} else if statusCode >= 300 && statusCode <= 307 {
+		// oss use 3xx, but response has no body
+		err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
+		return &Response{
+			StatusCode: resp.StatusCode,
+			Headers:    resp.Header,
+			Body:       resp.Body,
+		}, err
+	}
+
+	if conn.config.IsEnableCRC && crc != nil {
+		cliCRC = crc.Sum64()
+	}
+	srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64)
+
+	// 2xx, successful
+	return &Response{
+		StatusCode: resp.StatusCode,
+		Headers:    resp.Header,
+		Body:       resp.Body,
+		ClientCRC:  cliCRC,
+		ServerCRC:  srvCRC,
+	}, nil
+}
+
+func readResponseBody(resp *http.Response) ([]byte, error) {
+	defer resp.Body.Close()
+	out, err := ioutil.ReadAll(resp.Body)
+	if err == io.EOF {
+		err = nil
+	}
+	return out, err
+}
+
+func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
+	var storageErr ServiceError
+	if err := xml.Unmarshal(body, &storageErr); err != nil {
+		return storageErr, err
+	}
+	storageErr.StatusCode = statusCode
+	storageErr.RequestID = requestID
+	storageErr.RawMessage = string(body)
+	return storageErr, nil
+}
+
+func xmlUnmarshal(body io.Reader, v interface{}) error {
+	data, err := ioutil.ReadAll(body)
+	if err != nil {
+		return err
+	}
+	return xml.Unmarshal(data, v)
+}
+
+// Handle http timeout
+type timeoutConn struct {
+	conn        net.Conn
+	timeout     time.Duration
+	longTimeout time.Duration
+}
+
+func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn {
+	conn.SetReadDeadline(time.Now().Add(longTimeout))
+	return &timeoutConn{
+		conn:        conn,
+		timeout:     timeout,
+		longTimeout: longTimeout,
+	}
+}
+
+func (c *timeoutConn) Read(b []byte) (n int, err error) {
+	c.SetReadDeadline(time.Now().Add(c.timeout))
+	n, err = c.conn.Read(b)
+	c.SetReadDeadline(time.Now().Add(c.longTimeout))
+	return n, err
+}
+
+func (c *timeoutConn) Write(b []byte) (n int, err error) {
+	c.SetWriteDeadline(time.Now().Add(c.timeout))
+	n, err = c.conn.Write(b)
+	c.SetReadDeadline(time.Now().Add(c.longTimeout))
+	return n, err
+}
+
+func (c *timeoutConn) Close() error {
+	return c.conn.Close()
+}
+
+func (c *timeoutConn) LocalAddr() net.Addr {
+	return c.conn.LocalAddr()
+}
+
+func (c *timeoutConn) RemoteAddr() net.Addr {
+	return c.conn.RemoteAddr()
+}
+
+func (c *timeoutConn) SetDeadline(t time.Time) error {
+	return c.conn.SetDeadline(t)
+}
+
+func (c *timeoutConn) SetReadDeadline(t time.Time) error {
+	return c.conn.SetReadDeadline(t)
+}
+
+func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
+	return c.conn.SetWriteDeadline(t)
+}
+
+// UrlMaker - build url and resource
+const (
+	urlTypeCname  = 1
+	urlTypeIP     = 2
+	urlTypeAliyun = 3
+)
+
+type urlMaker struct {
+	Scheme  string // http or https
+	NetLoc  string // host or ip
+	Type    int    // 1 CNAME 2 IP 3 ALIYUN
+	IsProxy bool   // proxy
+}
+
+// Parse endpoint
+func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
+	if strings.HasPrefix(endpoint, "http://") {
+		um.Scheme = "http"
+		um.NetLoc = endpoint[len("http://"):]
+	} else if strings.HasPrefix(endpoint, "https://") {
+		um.Scheme = "https"
+		um.NetLoc = endpoint[len("https://"):]
+	} else {
+		um.Scheme = "http"
+		um.NetLoc = endpoint
+	}
+
+	host, _, err := net.SplitHostPort(um.NetLoc)
+	if err != nil {
+		host = um.NetLoc
+	}
+	ip := net.ParseIP(host)
+	if ip != nil {
+		um.Type = urlTypeIP
+	} else if isCname {
+		um.Type = urlTypeCname
+	} else {
+		um.Type = urlTypeAliyun
+	}
+	um.IsProxy = isProxy
+}
+
+// Build URL
+func (um urlMaker) getURL(bucket, object, params string) *url.URL {
+	var host = ""
+	var path = ""
+
+	if !um.IsProxy {
+		object = url.QueryEscape(object)
+	}
+
+	if um.Type == urlTypeCname {
+		host = um.NetLoc
+		path = "/" + object
+	} else if um.Type == urlTypeIP {
+		if bucket == "" {
+			host = um.NetLoc
+			path = "/"
+		} else {
+			host = um.NetLoc
+			path = fmt.Sprintf("/%s/%s", bucket, object)
+		}
+	} else {
+		if bucket == "" {
+			host = um.NetLoc
+			path = "/"
+		} else {
+			host = bucket + "." + um.NetLoc
+			path = "/" + object
+		}
+	}
+
+	uri := &url.URL{
+		Scheme:   um.Scheme,
+		Host:     host,
+		Path:     path,
+		RawQuery: params,
+	}
+
+	return uri
+}
+
+// Canonicalized Resource
+func (um urlMaker) getResource(bucketName, objectName, subResource string) string {
+	if subResource != "" {
+		subResource = "?" + subResource
+	}
+	if bucketName == "" {
+		return fmt.Sprintf("/%s%s", bucketName, subResource)
+	}
+	return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
+}

+ 124 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/conn_test.go

@@ -0,0 +1,124 @@
+package oss
+
+import (
+	"net/http"
+
+	. "gopkg.in/check.v1"
+)
+
+type OssConnSuite struct{}
+
+var _ = Suite(&OssConnSuite{})
+
+func (s *OssConnSuite) TestURLMarker(c *C) {
+	um := urlMaker{}
+	um.Init("docs.github.com", true, false)
+	c.Assert(um.Type, Equals, urlTypeCname)
+	c.Assert(um.Scheme, Equals, "http")
+	c.Assert(um.NetLoc, Equals, "docs.github.com")
+
+	c.Assert(um.getURL("bucket", "object", "params").String(), Equals, "http://docs.github.com/object?params")
+	c.Assert(um.getURL("bucket", "object", "").String(), Equals, "http://docs.github.com/object")
+	c.Assert(um.getURL("", "object", "").String(), Equals, "http://docs.github.com/object")
+	c.Assert(um.getResource("bucket", "object", "subres"), Equals, "/bucket/object?subres")
+	c.Assert(um.getResource("bucket", "object", ""), Equals, "/bucket/object")
+	c.Assert(um.getResource("", "object", ""), Equals, "/")
+
+	um.Init("https://docs.github.com", true, false)
+	c.Assert(um.Type, Equals, urlTypeCname)
+	c.Assert(um.Scheme, Equals, "https")
+	c.Assert(um.NetLoc, Equals, "docs.github.com")
+
+	um.Init("http://docs.github.com", true, false)
+	c.Assert(um.Type, Equals, urlTypeCname)
+	c.Assert(um.Scheme, Equals, "http")
+	c.Assert(um.NetLoc, Equals, "docs.github.com")
+
+	um.Init("docs.github.com:8080", false, true)
+	c.Assert(um.Type, Equals, urlTypeAliyun)
+	c.Assert(um.Scheme, Equals, "http")
+	c.Assert(um.NetLoc, Equals, "docs.github.com:8080")
+
+	c.Assert(um.getURL("bucket", "object", "params").String(), Equals, "http://bucket.docs.github.com:8080/object?params")
+	c.Assert(um.getURL("bucket", "object", "").String(), Equals, "http://bucket.docs.github.com:8080/object")
+	c.Assert(um.getURL("", "object", "").String(), Equals, "http://docs.github.com:8080/")
+	c.Assert(um.getResource("bucket", "object", "subres"), Equals, "/bucket/object?subres")
+	c.Assert(um.getResource("bucket", "object", ""), Equals, "/bucket/object")
+	c.Assert(um.getResource("", "object", ""), Equals, "/")
+
+	um.Init("https://docs.github.com:8080", false, true)
+	c.Assert(um.Type, Equals, urlTypeAliyun)
+	c.Assert(um.Scheme, Equals, "https")
+	c.Assert(um.NetLoc, Equals, "docs.github.com:8080")
+
+	um.Init("127.0.0.1", false, true)
+	c.Assert(um.Type, Equals, urlTypeIP)
+	c.Assert(um.Scheme, Equals, "http")
+	c.Assert(um.NetLoc, Equals, "127.0.0.1")
+
+	um.Init("http://127.0.0.1", false, false)
+	c.Assert(um.Type, Equals, urlTypeIP)
+	c.Assert(um.Scheme, Equals, "http")
+	c.Assert(um.NetLoc, Equals, "127.0.0.1")
+
+	um.Init("https://127.0.0.1:8080", false, false)
+	c.Assert(um.Type, Equals, urlTypeIP)
+	c.Assert(um.Scheme, Equals, "https")
+	c.Assert(um.NetLoc, Equals, "127.0.0.1:8080")
+}
+
+func (s *OssConnSuite) TestAuth(c *C) {
+	endpoint := "https://github.com/"
+	cfg := getDefaultOssConfig()
+	um := urlMaker{}
+	um.Init(endpoint, false, false)
+	conn := Conn{cfg, &um}
+	uri := um.getURL("bucket", "object", "")
+	req := &http.Request{
+		Method:     "PUT",
+		URL:        uri,
+		Proto:      "HTTP/1.1",
+		ProtoMajor: 1,
+		ProtoMinor: 1,
+		Header:     make(http.Header),
+		Host:       uri.Host,
+	}
+
+	req.Header.Set("Content-Type", "text/html")
+	req.Header.Set("Date", "Thu, 17 Nov 2005 18:49:58 GMT")
+	req.Header.Set("Host", endpoint)
+	req.Header.Set("X-OSS-Meta-Your", "your")
+	req.Header.Set("X-OSS-Meta-Author", "foo@bar.com")
+	req.Header.Set("X-OSS-Magic", "abracadabra")
+	req.Header.Set("Content-Md5", "ODBGOERFMDMzQTczRUY3NUE3NzA5QzdFNUYzMDQxNEM=")
+
+	conn.signHeader(req, um.getResource("bucket", "object", ""))
+	testLogger.Println("AUTHORIZATION:", req.Header.Get(HTTPHeaderAuthorization))
+}
+
+func (s *OssConnSuite) TestConnToolFunc(c *C) {
+	err := checkRespCode(202, []int{})
+	c.Assert(err, NotNil)
+
+	err = checkRespCode(202, []int{404})
+	c.Assert(err, NotNil)
+
+	err = checkRespCode(202, []int{202, 404})
+	c.Assert(err, IsNil)
+
+	srvErr, err := serviceErrFromXML([]byte(""), 312, "")
+	c.Assert(err, NotNil)
+	c.Assert(srvErr.StatusCode, Equals, 0)
+
+	srvErr, err = serviceErrFromXML([]byte("ABC"), 312, "")
+	c.Assert(err, NotNil)
+	c.Assert(srvErr.StatusCode, Equals, 0)
+
+	srvErr, err = serviceErrFromXML([]byte("<Error></Error>"), 312, "")
+	c.Assert(err, IsNil)
+	c.Assert(srvErr.StatusCode, Equals, 312)
+
+	unexpect := UnexpectedStatusCodeError{[]int{200}, 202}
+	c.Assert(len(unexpect.Error()) > 0, Equals, true)
+	c.Assert(unexpect.Got(), Equals, 202)
+}

+ 82 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go

@@ -0,0 +1,82 @@
+package oss
+
+// ACLType Bucket/Object的访问控制
+type ACLType string
+
+const (
+	// ACLPrivate 私有读写
+	ACLPrivate ACLType = "private"
+
+	// ACLPublicRead 公共读私有写
+	ACLPublicRead ACLType = "public-read"
+
+	// ACLPublicReadWrite 公共读写
+	ACLPublicReadWrite ACLType = "public-read-write"
+
+	// ACLDefault Object默认权限,Bucket无此权限
+	ACLDefault ACLType = "default"
+)
+
+// MetadataDirectiveType 对象COPY时新对象是否使用原对象的Meta
+type MetadataDirectiveType string
+
+const (
+	// MetaCopy 目标对象使用源对象的META
+	MetaCopy MetadataDirectiveType = "COPY"
+
+	// MetaReplace 目标对象使用自定义的META
+	MetaReplace MetadataDirectiveType = "REPLACE"
+)
+
+// Http头标签
+const (
+	HTTPHeaderAcceptEncoding     string = "Accept-Encoding"
+	HTTPHeaderAuthorization             = "Authorization"
+	HTTPHeaderCacheControl              = "Cache-Control"
+	HTTPHeaderContentDisposition        = "Content-Disposition"
+	HTTPHeaderContentEncoding           = "Content-Encoding"
+	HTTPHeaderContentLength             = "Content-Length"
+	HTTPHeaderContentMD5                = "Content-MD5"
+	HTTPHeaderContentType               = "Content-Type"
+	HTTPHeaderContentLanguage           = "Content-Language"
+	HTTPHeaderDate                      = "Date"
+	HTTPHeaderEtag                      = "ETag"
+	HTTPHeaderExpires                   = "Expires"
+	HTTPHeaderHost                      = "Host"
+	HTTPHeaderLastModified              = "Last-Modified"
+	HTTPHeaderRange                     = "Range"
+	HTTPHeaderLocation                  = "Location"
+	HTTPHeaderOrigin                    = "Origin"
+	HTTPHeaderServer                    = "Server"
+	HTTPHeaderUserAgent                 = "User-Agent"
+	HTTPHeaderIfModifiedSince           = "If-Modified-Since"
+	HTTPHeaderIfUnmodifiedSince         = "If-Unmodified-Since"
+	HTTPHeaderIfMatch                   = "If-Match"
+	HTTPHeaderIfNoneMatch               = "If-None-Match"
+
+	HTTPHeaderOssACL                         = "X-Oss-Acl"
+	HTTPHeaderOssMetaPrefix                  = "X-Oss-Meta-"
+	HTTPHeaderOssObjectACL                   = "X-Oss-Object-Acl"
+	HTTPHeaderOssSecurityToken               = "X-Oss-Security-Token"
+	HTTPHeaderOssServerSideEncryption        = "X-Oss-Server-Side-Encryption"
+	HTTPHeaderOssCopySource                  = "X-Oss-Copy-Source"
+	HTTPHeaderOssCopySourceRange             = "X-Oss-Copy-Source-Range"
+	HTTPHeaderOssCopySourceIfMatch           = "X-Oss-Copy-Source-If-Match"
+	HTTPHeaderOssCopySourceIfNoneMatch       = "X-Oss-Copy-Source-If-None-Match"
+	HTTPHeaderOssCopySourceIfModifiedSince   = "X-Oss-Copy-Source-If-Modified-Since"
+	HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since"
+	HTTPHeaderOssMetadataDirective           = "X-Oss-Metadata-Directive"
+	HTTPHeaderOssNextAppendPosition          = "X-Oss-Next-Append-Position"
+	HTTPHeaderOssRequestID                   = "X-Oss-Request-Id"
+	HTTPHeaderOssCRC64                       = "X-Oss-Hash-Crc64ecma"
+)
+
+// 其它常量
+const (
+	MaxPartSize = 5 * 1024 * 1024 * 1024 // 文件片最大值,5GB
+	MinPartSize = 100 * 1024             // 文件片最小值,100KB
+
+	TempFilePrefix = "oss-go-temp-" // 临时文件前缀
+
+	Version = "1.2.0" // Go sdk版本
+)

+ 44 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go

@@ -0,0 +1,44 @@
+package oss
+
+import (
+	"hash"
+	"hash/crc64"
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+	crc uint64
+	tab *crc64.Table
+}
+
+// NewCRC creates a new hash.Hash64 computing the CRC-64 checksum
+// using the polynomial represented by the Table.
+func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
+
+// Size returns the number of bytes Sum will return.
+func (d *digest) Size() int { return crc64.Size }
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (d *digest) BlockSize() int { return 1 }
+
+// Reset resets the Hash to its initial state.
+func (d *digest) Reset() { d.crc = 0 }
+
+// Write (via the embedded io.Writer interface) adds more data to the running hash.
+// It never returns an error.
+func (d *digest) Write(p []byte) (n int, err error) {
+	d.crc = crc64.Update(d.crc, d.tab, p)
+	return len(p), nil
+}
+
+// Sum64 returns crc64 value.
+func (d *digest) Sum64() uint64 { return d.crc }
+
+// Sum returns hash value.
+func (d *digest) Sum(in []byte) []byte {
+	s := d.Sum64()
+	return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}

+ 398 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/crc_test.go

@@ -0,0 +1,398 @@
+package oss
+
+import (
+	"crypto/md5"
+	"encoding/base64"
+	"hash/crc64"
+	"io"
+	"io/ioutil"
+	"os"
+	"strings"
+	"time"
+
+	. "gopkg.in/check.v1"
+)
+
+type OssCrcSuite struct {
+	client *Client
+	bucket *Bucket
+}
+
+var _ = Suite(&OssCrcSuite{})
+
+// Run once when the suite starts running
+func (s *OssCrcSuite) SetUpSuite(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+	s.client = client
+
+	s.client.CreateBucket(bucketName)
+	time.Sleep(5 * time.Second)
+
+	bucket, err := s.client.Bucket(bucketName)
+	c.Assert(err, IsNil)
+	s.bucket = bucket
+
+	testLogger.Println("test crc started")
+}
+
+// Run before each test or benchmark starts running
+func (s *OssCrcSuite) TearDownSuite(c *C) {
+	// Delete Part
+	lmur, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+
+	for _, upload := range lmur.Uploads {
+		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+			Key: upload.Key, UploadID: upload.UploadID}
+		err = s.bucket.AbortMultipartUpload(imur)
+		c.Assert(err, IsNil)
+	}
+
+	// Delete Objects
+	lor, err := s.bucket.ListObjects()
+	c.Assert(err, IsNil)
+
+	for _, object := range lor.Objects {
+		err = s.bucket.DeleteObject(object.Key)
+		c.Assert(err, IsNil)
+	}
+
+	testLogger.Println("test crc completed")
+}
+
+// Run after each test or benchmark runs
+func (s *OssCrcSuite) SetUpTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+}
+
+// Run once after all tests or benchmarks have finished running
+func (s *OssCrcSuite) TearDownTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+}
+
+// TestCRCGolden 测试OSS实现的CRC64
+func (s *OssCrcSuite) TestCRCGolden(c *C) {
+	type crcTest struct {
+		out uint64
+		in  string
+	}
+
+	var crcGolden = []crcTest{
+		{0x0, ""},
+		{0x3420000000000000, "a"},
+		{0x36c4200000000000, "ab"},
+		{0x3776c42000000000, "abc"},
+		{0x336776c420000000, "abcd"},
+		{0x32d36776c4200000, "abcde"},
+		{0x3002d36776c42000, "abcdef"},
+		{0x31b002d36776c420, "abcdefg"},
+		{0xe21b002d36776c4, "abcdefgh"},
+		{0x8b6e21b002d36776, "abcdefghi"},
+		{0x7f5b6e21b002d367, "abcdefghij"},
+		{0x8ec0e7c835bf9cdf, "Discard medicine more than two years old."},
+		{0xc7db1759e2be5ab4, "He who has a shady past knows that nice guys finish last."},
+		{0xfbf9d9603a6fa020, "I wouldn't marry him with a ten foot pole."},
+		{0xeafc4211a6daa0ef, "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"},
+		{0x3e05b21c7a4dc4da, "The days of the digital watch are numbered.  -Tom Stoppard"},
+		{0x5255866ad6ef28a6, "Nepal premier won't resign."},
+		{0x8a79895be1e9c361, "For every action there is an equal and opposite government program."},
+		{0x8878963a649d4916, "His money is twice tainted: 'taint yours and 'taint mine."},
+		{0xa7b9d53ea87eb82f, "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"},
+		{0xdb6805c0966a2f9c, "It's a tiny change to the code and not completely disgusting. - Bob Manchek"},
+		{0xf3553c65dacdadd2, "size:  a.out:  bad magic"},
+		{0x9d5e034087a676b9, "The major problem is with sendmail.  -Mark Horton"},
+		{0xa6db2d7f8da96417, "Give me a rock, paper and scissors and I will move the world.  CCFestoon"},
+		{0x325e00cd2fe819f9, "If the enemy is within range, then so are you."},
+		{0x88c6600ce58ae4c6, "It's well we cannot hear the screams/That we create in others' dreams."},
+		{0x28c4a3f3b769e078, "You remind me of a TV show, but that's all right: I watch it anyway."},
+		{0xa698a34c9d9f1dca, "C is as portable as Stonehedge!!"},
+		{0xf6c1e2a8c26c5cfc, "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"},
+		{0xd402559dfe9b70c, "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction.  Lewis-Randall Rule"},
+		{0xdb6efff26aa94946, "How can you write a big system without C++?  -Paul Glick"},
+	}
+
+	var tab = crc64.MakeTable(crc64.ISO)
+
+	for i := 0; i < len(crcGolden); i++ {
+		golden := crcGolden[i]
+		crc := NewCRC(tab, 0)
+		io.WriteString(crc, golden.in)
+		sum := crc.Sum64()
+
+		c.Assert(sum, Equals, golden.out)
+	}
+}
+
+// TestEnableCRCAndMD5 开启MD5和CRC校验
+func (s *OssCrcSuite) TestEnableCRCAndMD5(c *C) {
+	objectName := objectNamePrefix + "tecam"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFileName := "BingWallpaper-2015-11-07-2.jpg"
+	objectValue := "空山新雨后,天气晚来秋。明月松间照,清泉石上流。竹喧归浣女,莲动下渔舟。随意春芳歇,王孙自可留。"
+
+	client, err := New(endpoint, accessID, accessKey, EnableCRC(true), EnableMD5(true), MD5ThresholdCalcInMemory(200*1024))
+	c.Assert(err, IsNil)
+	bucket, err := client.Bucket(bucketName)
+	c.Assert(err, IsNil)
+
+	// PutObject
+	err = bucket.PutObject(objectName, strings.NewReader(objectValue))
+	c.Assert(err, IsNil)
+
+	// GetObject
+	body, err := bucket.GetObject(objectName)
+	c.Assert(err, IsNil)
+	_, err = ioutil.ReadAll(body)
+	c.Assert(err, IsNil)
+	body.Close()
+
+	// GetObjectWithCRC
+	getResult, err := bucket.DoGetObject(&GetObjectRequest{objectName}, nil)
+	c.Assert(err, IsNil)
+	str, err := readBody(getResult.Response.Body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+	c.Assert(getResult.ClientCRC.Sum64(), Equals, getResult.ServerCRC)
+
+	// PutObjectFromFile
+	err = bucket.PutObjectFromFile(objectName, fileName)
+	c.Assert(err, IsNil)
+
+	// GetObjectToFile
+	err = bucket.GetObjectToFile(objectName, newFileName)
+	c.Assert(err, IsNil)
+	eq, err := compareFiles(fileName, newFileName)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// DeleteObject
+	err = bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// AppendObject
+	var nextPos int64
+	nextPos, err = bucket.AppendObject(objectName, strings.NewReader(objectValue), nextPos)
+	c.Assert(err, IsNil)
+	nextPos, err = bucket.AppendObject(objectName, strings.NewReader(objectValue), nextPos)
+	c.Assert(err, IsNil)
+
+	err = bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	request := &AppendObjectRequest{
+		ObjectKey: objectName,
+		Reader:    strings.NewReader(objectValue),
+		Position:  0,
+	}
+	appendResult, err := bucket.DoAppendObject(request, []Option{InitCRC(0)})
+	c.Assert(err, IsNil)
+	request.Position = appendResult.NextPosition
+	appendResult, err = bucket.DoAppendObject(request, []Option{InitCRC(appendResult.CRC)})
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	//	MultipartUpload
+	chunks, err := SplitFileByPartSize(fileName, 100*1024)
+	imurUpload, err := bucket.InitiateMultipartUpload(objectName)
+	c.Assert(err, IsNil)
+	var partsUpload []UploadPart
+
+	for _, chunk := range chunks {
+		part, err := bucket.UploadPartFromFile(imurUpload, fileName, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		partsUpload = append(partsUpload, part)
+	}
+
+	_, err = bucket.CompleteMultipartUpload(imurUpload, partsUpload)
+	c.Assert(err, IsNil)
+
+	// Check MultipartUpload
+	err = bucket.GetObjectToFile(objectName, newFileName)
+	c.Assert(err, IsNil)
+	eq, err = compareFiles(fileName, newFileName)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// DeleteObjects
+	_, err = bucket.DeleteObjects([]string{objectName})
+	c.Assert(err, IsNil)
+}
+
+// TestDisableCRCAndMD5 关闭MD5和CRC校验
+func (s *OssCrcSuite) TestDisableCRCAndMD5(c *C) {
+	objectName := objectNamePrefix + "tdcam"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFileName := "BingWallpaper-2015-11-07-3.jpg"
+	objectValue := "中岁颇好道,晚家南山陲。兴来每独往,胜事空自知。行到水穷处,坐看云起时。偶然值林叟,谈笑无还期。"
+
+	client, err := New(endpoint, accessID, accessKey, EnableCRC(false), EnableMD5(false))
+	c.Assert(err, IsNil)
+	bucket, err := client.Bucket(bucketName)
+	c.Assert(err, IsNil)
+
+	// PutObject
+	err = bucket.PutObject(objectName, strings.NewReader(objectValue))
+	c.Assert(err, IsNil)
+
+	// GetObject
+	body, err := bucket.GetObject(objectName)
+	c.Assert(err, IsNil)
+	_, err = ioutil.ReadAll(body)
+	c.Assert(err, IsNil)
+	body.Close()
+
+	// GetObjectWithCRC
+	getResult, err := bucket.DoGetObject(&GetObjectRequest{objectName}, nil)
+	c.Assert(err, IsNil)
+	str, err := readBody(getResult.Response.Body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	// PutObjectFromFile
+	err = bucket.PutObjectFromFile(objectName, fileName)
+	c.Assert(err, IsNil)
+
+	// GetObjectToFile
+	err = bucket.GetObjectToFile(objectName, newFileName)
+	c.Assert(err, IsNil)
+	eq, err := compareFiles(fileName, newFileName)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// DeleteObject
+	err = bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// AppendObject
+	var nextPos int64
+	nextPos, err = bucket.AppendObject(objectName, strings.NewReader(objectValue), nextPos)
+	c.Assert(err, IsNil)
+	nextPos, err = bucket.AppendObject(objectName, strings.NewReader(objectValue), nextPos)
+	c.Assert(err, IsNil)
+
+	err = bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	request := &AppendObjectRequest{
+		ObjectKey: objectName,
+		Reader:    strings.NewReader(objectValue),
+		Position:  0,
+	}
+	appendResult, err := bucket.DoAppendObject(request, []Option{InitCRC(0)})
+	c.Assert(err, IsNil)
+	request.Position = appendResult.NextPosition
+	appendResult, err = bucket.DoAppendObject(request, []Option{InitCRC(appendResult.CRC)})
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	//	MultipartUpload
+	chunks, err := SplitFileByPartSize(fileName, 100*1024)
+	imurUpload, err := bucket.InitiateMultipartUpload(objectName)
+	c.Assert(err, IsNil)
+	var partsUpload []UploadPart
+
+	for _, chunk := range chunks {
+		part, err := bucket.UploadPartFromFile(imurUpload, fileName, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		partsUpload = append(partsUpload, part)
+	}
+
+	_, err = bucket.CompleteMultipartUpload(imurUpload, partsUpload)
+	c.Assert(err, IsNil)
+
+	// Check MultipartUpload
+	err = bucket.GetObjectToFile(objectName, newFileName)
+	c.Assert(err, IsNil)
+	eq, err = compareFiles(fileName, newFileName)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// DeleteObjects
+	_, err = bucket.DeleteObjects([]string{objectName})
+	c.Assert(err, IsNil)
+}
+
+// TestSpecifyContentMD5 指定MD5
+func (s *OssCrcSuite) TestSpecifyContentMD5(c *C) {
+	objectName := objectNamePrefix + "tdcam"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	objectValue := "积雨空林烟火迟,蒸藜炊黍饷东菑。漠漠水田飞白鹭,阴阴夏木啭黄鹂。山中习静观朝槿,松下清斋折露葵。野老与人争席罢,海鸥何事更相疑。"
+
+	mh := md5.Sum([]byte(objectValue))
+	md5B64 := base64.StdEncoding.EncodeToString(mh[:])
+
+	// PutObject
+	err := s.bucket.PutObject(objectName, strings.NewReader(objectValue), ContentMD5(md5B64))
+	c.Assert(err, IsNil)
+
+	// PutObjectFromFile
+	file, err := os.Open(fileName)
+	md5 := md5.New()
+	io.Copy(md5, file)
+	mdHex := base64.StdEncoding.EncodeToString(md5.Sum(nil)[:])
+	err = s.bucket.PutObjectFromFile(objectName, fileName, ContentMD5(mdHex))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// AppendObject
+	var nextPos int64
+	nextPos, err = s.bucket.AppendObject(objectName, strings.NewReader(objectValue), nextPos)
+	c.Assert(err, IsNil)
+	nextPos, err = s.bucket.AppendObject(objectName, strings.NewReader(objectValue), nextPos)
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	request := &AppendObjectRequest{
+		ObjectKey: objectName,
+		Reader:    strings.NewReader(objectValue),
+		Position:  0,
+	}
+	appendResult, err := s.bucket.DoAppendObject(request, []Option{InitCRC(0)})
+	c.Assert(err, IsNil)
+	request.Position = appendResult.NextPosition
+	appendResult, err = s.bucket.DoAppendObject(request, []Option{InitCRC(appendResult.CRC)})
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	//	MultipartUpload
+	imurUpload, err := s.bucket.InitiateMultipartUpload(objectName)
+	c.Assert(err, IsNil)
+
+	var partsUpload []UploadPart
+	part, err := s.bucket.UploadPart(imurUpload, strings.NewReader(objectValue), (int64)(len([]byte(objectValue))), 1)
+	c.Assert(err, IsNil)
+	partsUpload = append(partsUpload, part)
+
+	_, err = s.bucket.CompleteMultipartUpload(imurUpload, partsUpload)
+	c.Assert(err, IsNil)
+
+	// DeleteObject
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+// TestCopyObjectToOrFromNegative
+func (s *OssCrcSuite) TestAppendObjectNegative(c *C) {
+	objectName := objectNamePrefix + "taoncrc"
+	objectValue := "空山不见人,但闻人语响。返影入深林,复照青苔上。"
+
+	nextPos, err := s.bucket.AppendObject(objectName, strings.NewReader(objectValue), 0, InitCRC(0))
+	c.Assert(err, IsNil)
+
+	nextPos, err = s.bucket.AppendObject(objectName, strings.NewReader(objectValue), nextPos, InitCRC(0))
+	c.Assert(err, NotNil)
+	c.Assert(strings.HasPrefix(err.Error(), "oss: the crc"), Equals, true)
+}

+ 399 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go

@@ -0,0 +1,399 @@
+package oss
+
+import (
+	"crypto/md5"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"io"
+	"io/ioutil"
+	"os"
+	"strconv"
+)
+
+//
+// DownloadFile 分片下载文件
+//
+// objectKey  object key。
+// filePath   本地文件。objectKey下载到文件。
+// partSize   本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。
+// options    Object的属性限制项。详见GetObject。
+//
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
+	if partSize < 1 || partSize > MaxPartSize {
+		return errors.New("oss: part size invalid range (1, 5GB]")
+	}
+
+	cpConf, err := getCpConfig(options, filePath)
+	if err != nil {
+		return err
+	}
+
+	routines := getRoutines(options)
+
+	if cpConf.IsEnable {
+		return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
+	}
+
+	return bucket.downloadFile(objectKey, filePath, partSize, options, routines)
+}
+
+// ----- 并发无断点的下载  -----
+
+// 工作协程参数
+type downloadWorkerArg struct {
+	bucket   *Bucket
+	key      string
+	filePath string
+	options  []Option
+	hook     downloadPartHook
+}
+
+// Hook用于测试
+type downloadPartHook func(part downloadPart) error
+
+var downloadPartHooker downloadPartHook = defaultDownloadPartHook
+
+func defaultDownloadPartHook(part downloadPart) error {
+	return nil
+}
+
+// 工作协程
+func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <- chan bool) {
+	for part := range jobs {
+		if err := arg.hook(part); err != nil {
+			failed <- err
+			break
+		}
+
+		opt := Range(part.Start, part.End)
+		opts := append(arg.options, opt)
+		rd, err := arg.bucket.GetObject(arg.key, opts...)
+		if err != nil {
+			failed <- err
+			break
+		}
+		defer rd.Close()
+
+		select {
+			case <-die:
+				return
+			default:
+		}
+
+		fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, 0660)
+		if err != nil {
+			failed <- err
+			break
+		}
+		defer fd.Close()
+
+		_, err = fd.Seek(part.Start, os.SEEK_SET)
+		if err != nil {
+			failed <- err
+			break
+		}
+
+		_, err = io.Copy(fd, rd)
+		if err != nil {
+			failed <- err
+			break
+		}
+
+		results <- part
+	}
+}
+
+// 调度协程
+func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
+	for _, part := range parts {
+		jobs <- part
+	}
+	close(jobs)
+}
+
+// 下载片
+type downloadPart struct {
+	Index int   // 片序号,从0开始编号
+	Start int64 // 片起始位置
+	End   int64 // 片结束位置
+}
+
+// 文件分片
+func getDownloadParts(bucket *Bucket, objectKey string, partSize int64) ([]downloadPart, error) {
+	meta, err := bucket.GetObjectDetailedMeta(objectKey)
+	if err != nil {
+		return nil, err
+	}
+
+	parts := []downloadPart{}
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	part := downloadPart{}
+	i := 0
+	for offset := int64(0); offset < objectSize; offset += partSize {
+		part.Index = i
+		part.Start = offset
+		part.End = GetPartEnd(offset, objectSize, partSize)
+		parts = append(parts, part)
+		i++
+	}
+	return parts, nil
+}
+
+// 并发无断点续传的下载
+func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
+	// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
+	fd, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0660)
+	if err != nil {
+		return err
+	}
+	fd.Close()
+
+	// 分割文件
+	parts, err := getDownloadParts(&bucket, objectKey, partSize)
+	if err != nil {
+		return err
+	}
+
+	jobs := make(chan downloadPart, len(parts))
+	results := make(chan downloadPart, len(parts))
+	failed := make(chan error)
+	die := make(chan bool)
+
+	// 启动工作协程
+	arg := downloadWorkerArg{&bucket, objectKey, filePath, options, downloadPartHooker}
+	for w := 1; w <= routines; w++ {
+		go downloadWorker(w, arg, jobs, results, failed, die)
+	}
+
+	// 并发上传分片
+	go downloadScheduler(jobs, parts)
+
+	// 等待分片下载完成
+	completed := 0
+	ps := make([]downloadPart, len(parts))
+	for completed < len(parts) {
+		select {
+		case part := <-results:
+			completed++
+			ps[part.Index] = part
+		case err := <-failed:
+			close(die)
+			return err
+		}
+
+		if completed >= len(parts) {
+			break
+		}
+	}
+
+	return nil
+}
+
+// ----- 并发有断点的下载  -----
+
+const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
+
+type downloadCheckpoint struct {
+	Magic    string         // magic
+	MD5      string         // cp内容的MD5
+	FilePath string         // 本地文件
+	Object   string         // key
+	ObjStat  objectStat     // 文件状态
+	Parts    []downloadPart // 全部分片
+	PartStat []bool         // 分片下载是否完成
+}
+
+type objectStat struct {
+	Size         int64  // 大小
+	LastModified string // 最后修改时间
+	Etag         string // etag
+}
+
+// CP数据是否有效,CP有效且Object没有更新时有效
+func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
+	// 比较CP的Magic及MD5
+	cpb := cp
+	cpb.MD5 = ""
+	js, _ := json.Marshal(cpb)
+	sum := md5.Sum(js)
+	b64 := base64.StdEncoding.EncodeToString(sum[:])
+
+	if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
+		return false, nil
+	}
+
+	// 确认object没有更新
+	meta, err := bucket.GetObjectDetailedMeta(objectKey)
+	if err != nil {
+		return false, err
+	}
+
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+	if err != nil {
+		return false, err
+	}
+
+	// 比较Object的大小/最后修改时间/etag
+	if cp.ObjStat.Size != objectSize ||
+		cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
+		cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
+		return false, nil
+	}
+
+	return true, nil
+}
+
+// 从文件中load
+func (cp *downloadCheckpoint) load(filePath string) error {
+	contents, err := ioutil.ReadFile(filePath)
+	if err != nil {
+		return err
+	}
+
+	err = json.Unmarshal(contents, cp)
+	return err
+}
+
+// dump到文件
+func (cp *downloadCheckpoint) dump(filePath string) error {
+	bcp := *cp
+
+	// 计算MD5
+	bcp.MD5 = ""
+	js, err := json.Marshal(bcp)
+	if err != nil {
+		return err
+	}
+	sum := md5.Sum(js)
+	b64 := base64.StdEncoding.EncodeToString(sum[:])
+	bcp.MD5 = b64
+
+	// 序列化
+	js, err = json.Marshal(bcp)
+	if err != nil {
+		return err
+	}
+
+	// dump
+	return ioutil.WriteFile(filePath, js, 0644)
+}
+
+// 未完成的分片
+func (cp downloadCheckpoint) todoParts() []downloadPart {
+	dps := []downloadPart{}
+	for i, ps := range cp.PartStat {
+		if !ps {
+			dps = append(dps, cp.Parts[i])
+		}
+	}
+	return dps
+}
+
+// 初始化下载任务
+func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64) error {
+	// cp
+	cp.Magic = downloadCpMagic
+	cp.FilePath = filePath
+	cp.Object = objectKey
+
+	// object
+	meta, err := bucket.GetObjectDetailedMeta(objectKey)
+	if err != nil {
+		return err
+	}
+
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+	if err != nil {
+		return err
+	}
+
+	cp.ObjStat.Size = objectSize
+	cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
+	cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
+
+	// parts
+	cp.Parts, err = getDownloadParts(bucket, objectKey, partSize)
+	if err != nil {
+		return err
+	}
+	cp.PartStat = make([]bool, len(cp.Parts))
+	for i := range cp.PartStat {
+		cp.PartStat[i] = false
+	}
+
+	return nil
+}
+
+func (cp *downloadCheckpoint) complete(cpFilePath string) error {
+	os.Remove(cpFilePath)
+    return nil
+}
+
+// 并发带断点的下载
+func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
+	// LOAD CP数据
+	dcp := downloadCheckpoint{}
+	err := dcp.load(cpFilePath)
+	if err != nil {
+		os.Remove(cpFilePath)
+	}
+
+	// LOAD出错或数据无效重新初始化下载
+	valid, err := dcp.isValid(&bucket, objectKey)
+	if err != nil || !valid {
+		if err = dcp.prepare(&bucket, objectKey, filePath, partSize); err != nil {
+			return err
+		}
+		os.Remove(cpFilePath)
+	}
+
+	// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
+	fd, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0660)
+	if err != nil {
+		return err
+	}
+	fd.Close()
+
+	// 未完成的分片
+	parts := dcp.todoParts()
+	jobs := make(chan downloadPart, len(parts))
+	results := make(chan downloadPart, len(parts))
+	failed := make(chan error)
+	die := make(chan bool)
+
+	// 启动工作协程
+	arg := downloadWorkerArg{&bucket, objectKey, filePath, options, downloadPartHooker}
+	for w := 1; w <= routines; w++ {
+		go downloadWorker(w, arg, jobs, results, failed, die)
+	}
+
+	// 并发下载分片
+	go downloadScheduler(jobs, parts)
+
+	// 等待分片下载完成
+	completed := 0
+	for completed < len(parts) {
+		select {
+		case part := <-results:
+			completed++
+			dcp.PartStat[part.Index] = true
+			dcp.dump(cpFilePath)
+		case err := <-failed:
+			close(die)
+			return err
+		}
+
+		if completed >= len(parts) {
+			break
+		}
+	}
+
+	return dcp.complete(cpFilePath)
+}

+ 352 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/download_test.go

@@ -0,0 +1,352 @@
+package oss
+
+import (
+	"fmt"
+	"os"
+	"time"
+
+	. "gopkg.in/check.v1"
+)
+
+type OssDownloadSuite struct {
+	client *Client
+	bucket *Bucket
+}
+
+var _ = Suite(&OssDownloadSuite{})
+
+// Run once when the suite starts running
+func (s *OssDownloadSuite) SetUpSuite(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+	s.client = client
+
+	s.client.CreateBucket(bucketName)
+	time.Sleep(5 * time.Second)
+
+	bucket, err := s.client.Bucket(bucketName)
+	c.Assert(err, IsNil)
+	s.bucket = bucket
+
+	testLogger.Println("test download started")
+}
+
+// Run before each test or benchmark starts running
+func (s *OssDownloadSuite) TearDownSuite(c *C) {
+	// Delete Part
+	lmur, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+
+	for _, upload := range lmur.Uploads {
+		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+			Key: upload.Key, UploadID: upload.UploadID}
+		err = s.bucket.AbortMultipartUpload(imur)
+		c.Assert(err, IsNil)
+	}
+
+	// Delete Objects
+	lor, err := s.bucket.ListObjects()
+	c.Assert(err, IsNil)
+
+	for _, object := range lor.Objects {
+		err = s.bucket.DeleteObject(object.Key)
+		c.Assert(err, IsNil)
+	}
+
+	testLogger.Println("test download completed")
+}
+
+// Run after each test or benchmark runs
+func (s *OssDownloadSuite) SetUpTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+}
+
+// Run once after all tests or benchmarks have finished running
+func (s *OssDownloadSuite) TearDownTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+}
+
+// TestUploadRoutineWithoutRecovery 多线程无断点恢复的下载
+func (s *OssDownloadSuite) TestDownloadRoutineWithoutRecovery(c *C) {
+	objectName := objectNamePrefix + "tdrwr"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "down-new-file.jpg"
+
+	// 上传文件
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+
+	// 使用默认值下载
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024)
+	c.Assert(err, IsNil)
+
+	// check
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// 使用2个协程下载,小于总分片数5
+	os.Remove(newFile)
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(2))
+	c.Assert(err, IsNil)
+
+	// check
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// 使用5个协程下载,等于总分片数5
+	os.Remove(newFile)
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(5))
+	c.Assert(err, IsNil)
+
+	// check
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// 使用10个协程下载,大于总分片数5
+	os.Remove(newFile)
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(10))
+	c.Assert(err, IsNil)
+
+	// check
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+// ErrorHooker DownloadPart请求Hook
+func DownErrorHooker(part downloadPart) error {
+	if part.Index == 4 {
+		time.Sleep(time.Second)
+		return fmt.Errorf("ErrorHooker")
+	}
+	return nil
+}
+
+// TestDownloadRoutineWithRecovery 多线程有断点恢复的下载
+func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
+	objectName := objectNamePrefix + "tdrtr"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "down-new-file-2.jpg"
+
+	// 上传文件
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+
+	// 下载,CP使用默认值
+	downloadPartHooker = DownErrorHooker
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	downloadPartHooker = defaultDownloadPartHook
+
+	// check
+	dcp := downloadCheckpoint{}
+	err = dcp.load(newFile + ".cp")
+	c.Assert(err, IsNil)
+	c.Assert(dcp.Magic, Equals, downloadCpMagic)
+	c.Assert(len(dcp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
+	c.Assert(dcp.FilePath, Equals, newFile)
+	c.Assert(dcp.ObjStat.Size, Equals, int64(482048))
+	c.Assert(len(dcp.ObjStat.LastModified), Equals, len("2015-12-17 18:43:03 +0800 CST"))
+	c.Assert(dcp.ObjStat.Etag, Equals, "\"2351E662233817A7AE974D8C5B0876DD-5\"")
+	c.Assert(dcp.Object, Equals, objectName)
+	c.Assert(len(dcp.Parts), Equals, 5)
+	c.Assert(len(dcp.todoParts()), Equals, 1)
+
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	err = dcp.load(newFile + ".cp")
+	c.Assert(err, NotNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// 下载,指定CP
+	os.Remove(newFile)
+	downloadPartHooker = DownErrorHooker
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, objectName+".cp"))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	downloadPartHooker = defaultDownloadPartHook
+
+	// check
+	dcp = downloadCheckpoint{}
+	err = dcp.load(objectName + ".cp")
+	c.Assert(err, IsNil)
+	c.Assert(dcp.Magic, Equals, downloadCpMagic)
+	c.Assert(len(dcp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
+	c.Assert(dcp.FilePath, Equals, newFile)
+	c.Assert(dcp.ObjStat.Size, Equals, int64(482048))
+	c.Assert(len(dcp.ObjStat.LastModified), Equals, len("2015-12-17 18:43:03 +0800 CST"))
+	c.Assert(dcp.ObjStat.Etag, Equals, "\"2351E662233817A7AE974D8C5B0876DD-5\"")
+	c.Assert(dcp.Object, Equals, objectName)
+	c.Assert(len(dcp.Parts), Equals, 5)
+	c.Assert(len(dcp.todoParts()), Equals, 1)
+
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, objectName+".cp"))
+	c.Assert(err, IsNil)
+
+	err = dcp.load(objectName + ".cp")
+	c.Assert(err, NotNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// 一次完成下载,中间没有错误
+	os.Remove(newFile)
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	err = dcp.load(newFile + ".cp")
+	c.Assert(err, NotNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// 一次完成下载,中间没有错误
+	os.Remove(newFile)
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(10), Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	err = dcp.load(newFile + ".cp")
+	c.Assert(err, NotNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+// TestDownloadOption 选项
+func (s *OssDownloadSuite) TestDownloadOption(c *C) {
+	objectName := objectNamePrefix + "tdmo"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "down-new-file-3.jpg"
+
+	// 上传文件
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+
+	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
+	c.Assert(err, IsNil)
+
+	// IfMatch
+	os.Remove(newFile)
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), IfMatch(meta.Get("Etag")))
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// IfNoneMatch
+	os.Remove(newFile)
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), IfNoneMatch(meta.Get("Etag")))
+	c.Assert(err, NotNil)
+
+	// IfMatch
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, ""), IfMatch(meta.Get("Etag")))
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// IfNoneMatch
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, ""), IfNoneMatch(meta.Get("Etag")))
+	c.Assert(err, NotNil)
+}
+
+// TestDownloadObjectChange 上传过程中文件修改了
+func (s *OssDownloadSuite) TestDownloadObjectChange(c *C) {
+	objectName := objectNamePrefix + "tdloc"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "down-new-file-4.jpg"
+
+	// 上传文件
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+
+	// 下载,CP使用默认值
+	downloadPartHooker = DownErrorHooker
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	downloadPartHooker = defaultDownloadPartHook
+
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+}
+
+// TestDownloadNegative Download Negative
+func (s *OssDownloadSuite) TestDownloadNegative(c *C) {
+	objectName := objectNamePrefix + "tdn"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "down-new-file-3.jpg"
+
+	// 上传文件
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+
+	// worker线程错误
+	downloadPartHooker = DownErrorHooker
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(2))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	downloadPartHooker = defaultDownloadPartHook
+
+	// 本地文件不存在
+	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Routines(2))
+	c.Assert(err, NotNil)
+
+	// 指定的分片大小无效
+	err = s.bucket.DownloadFile(objectName, newFile, 0, Routines(2))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100, Routines(2))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 本地文件不存在
+	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Routines(2), Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	// 指定的分片大小无效
+	err = s.bucket.DownloadFile(objectName, newFile, -1, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.DownloadFile(objectName, newFile, 0, Routines(2), Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100, Routines(2), Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+}

+ 82 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go

@@ -0,0 +1,82 @@
+package oss
+
+import (
+	"encoding/xml"
+	"fmt"
+	"net/http"
+	"strings"
+)
+
+// ServiceError contains fields of the error response from Oss Service REST API.
+type ServiceError struct {
+	XMLName    xml.Name `xml:"Error"`
+	Code       string   `xml:"Code"`      // OSS返回给用户的错误码
+	Message    string   `xml:"Message"`   // OSS给出的详细错误信息
+	RequestID  string   `xml:"RequestId"` // 用于唯一标识该次请求的UUID
+	HostID     string   `xml:"HostId"`    // 用于标识访问的OSS集群
+	RawMessage string   // OSS返回的原始消息内容
+	StatusCode int      // HTTP状态码
+}
+
+// Implement interface error
+func (e ServiceError) Error() string {
+	return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s",
+		e.StatusCode, e.Code, e.Message, e.RequestID)
+}
+
+// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
+// nor with an HTTP status code indicating success.
+type UnexpectedStatusCodeError struct {
+	allowed []int // 预期OSS返回HTTP状态码
+	got     int   // OSS实际返回HTTP状态码
+}
+
+// Implement interface error
+func (e UnexpectedStatusCodeError) Error() string {
+	s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
+
+	got := s(e.got)
+	expected := []string{}
+	for _, v := range e.allowed {
+		expected = append(expected, s(v))
+	}
+	return fmt.Sprintf("oss: status code from service response is %s; was expecting %s",
+		got, strings.Join(expected, " or "))
+}
+
+// Got is the actual status code returned by oss.
+func (e UnexpectedStatusCodeError) Got() int {
+	return e.got
+}
+
+// checkRespCode returns UnexpectedStatusError if the given response code is not
+// one of the allowed status codes; otherwise nil.
+func checkRespCode(respCode int, allowed []int) error {
+	for _, v := range allowed {
+		if respCode == v {
+			return nil
+		}
+	}
+	return UnexpectedStatusCodeError{allowed, respCode}
+}
+
+// CRCCheckError is returned when crc check is inconsistent between client and server
+type CRCCheckError struct {
+	clientCRC uint64 // 客户端计算的CRC64值
+	serverCRC uint64 // 服务端计算的CRC64值
+	operation string // 上传操作,如PutObject/AppendObject/UploadPart等
+	requestID string // 本次操作的RequestID
+}
+
+// Implement interface error
+func (e CRCCheckError) Error() string {
+	return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
+		e.operation, e.clientCRC, e.serverCRC, e.requestID)
+}
+
+func checkCRC(resp *Response, operation string) error {
+	if resp.ClientCRC == resp.ServerCRC {
+		return nil
+	}
+	return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)}
+}

+ 245 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go

@@ -0,0 +1,245 @@
+package oss
+
+import (
+	"mime"
+	"path"
+	"strings"
+)
+
+var extToMimeType = map[string]string{
+	".xlsx":    "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+	".xltx":    "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
+	".potx":    "application/vnd.openxmlformats-officedocument.presentationml.template",
+	".ppsx":    "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
+	".pptx":    "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+	".sldx":    "application/vnd.openxmlformats-officedocument.presentationml.slide",
+	".docx":    "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+	".dotx":    "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
+	".xlam":    "application/vnd.ms-excel.addin.macroEnabled.12",
+	".xlsb":    "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
+	".apk":     "application/vnd.android.package-archive",
+	".hqx":     "application/mac-binhex40",
+	".cpt":     "application/mac-compactpro",
+	".doc":     "application/msword",
+	".ogg":     "application/ogg",
+	".pdf":     "application/pdf",
+	".rtf":     "text/rtf",
+	".mif":     "application/vnd.mif",
+	".xls":     "application/vnd.ms-excel",
+	".ppt":     "application/vnd.ms-powerpoint",
+	".odc":     "application/vnd.oasis.opendocument.chart",
+	".odb":     "application/vnd.oasis.opendocument.database",
+	".odf":     "application/vnd.oasis.opendocument.formula",
+	".odg":     "application/vnd.oasis.opendocument.graphics",
+	".otg":     "application/vnd.oasis.opendocument.graphics-template",
+	".odi":     "application/vnd.oasis.opendocument.image",
+	".odp":     "application/vnd.oasis.opendocument.presentation",
+	".otp":     "application/vnd.oasis.opendocument.presentation-template",
+	".ods":     "application/vnd.oasis.opendocument.spreadsheet",
+	".ots":     "application/vnd.oasis.opendocument.spreadsheet-template",
+	".odt":     "application/vnd.oasis.opendocument.text",
+	".odm":     "application/vnd.oasis.opendocument.text-master",
+	".ott":     "application/vnd.oasis.opendocument.text-template",
+	".oth":     "application/vnd.oasis.opendocument.text-web",
+	".sxw":     "application/vnd.sun.xml.writer",
+	".stw":     "application/vnd.sun.xml.writer.template",
+	".sxc":     "application/vnd.sun.xml.calc",
+	".stc":     "application/vnd.sun.xml.calc.template",
+	".sxd":     "application/vnd.sun.xml.draw",
+	".std":     "application/vnd.sun.xml.draw.template",
+	".sxi":     "application/vnd.sun.xml.impress",
+	".sti":     "application/vnd.sun.xml.impress.template",
+	".sxg":     "application/vnd.sun.xml.writer.global",
+	".sxm":     "application/vnd.sun.xml.math",
+	".sis":     "application/vnd.symbian.install",
+	".wbxml":   "application/vnd.wap.wbxml",
+	".wmlc":    "application/vnd.wap.wmlc",
+	".wmlsc":   "application/vnd.wap.wmlscriptc",
+	".bcpio":   "application/x-bcpio",
+	".torrent": "application/x-bittorrent",
+	".bz2":     "application/x-bzip2",
+	".vcd":     "application/x-cdlink",
+	".pgn":     "application/x-chess-pgn",
+	".cpio":    "application/x-cpio",
+	".csh":     "application/x-csh",
+	".dvi":     "application/x-dvi",
+	".spl":     "application/x-futuresplash",
+	".gtar":    "application/x-gtar",
+	".hdf":     "application/x-hdf",
+	".jar":     "application/x-java-archive",
+	".jnlp":    "application/x-java-jnlp-file",
+	".js":      "application/x-javascript",
+	".ksp":     "application/x-kspread",
+	".chrt":    "application/x-kchart",
+	".kil":     "application/x-killustrator",
+	".latex":   "application/x-latex",
+	".rpm":     "application/x-rpm",
+	".sh":      "application/x-sh",
+	".shar":    "application/x-shar",
+	".swf":     "application/x-shockwave-flash",
+	".sit":     "application/x-stuffit",
+	".sv4cpio": "application/x-sv4cpio",
+	".sv4crc":  "application/x-sv4crc",
+	".tar":     "application/x-tar",
+	".tcl":     "application/x-tcl",
+	".tex":     "application/x-tex",
+	".man":     "application/x-troff-man",
+	".me":      "application/x-troff-me",
+	".ms":      "application/x-troff-ms",
+	".ustar":   "application/x-ustar",
+	".src":     "application/x-wais-source",
+	".zip":     "application/zip",
+	".m3u":     "audio/x-mpegurl",
+	".ra":      "audio/x-pn-realaudio",
+	".wav":     "audio/x-wav",
+	".wma":     "audio/x-ms-wma",
+	".wax":     "audio/x-ms-wax",
+	".pdb":     "chemical/x-pdb",
+	".xyz":     "chemical/x-xyz",
+	".bmp":     "image/bmp",
+	".gif":     "image/gif",
+	".ief":     "image/ief",
+	".png":     "image/png",
+	".wbmp":    "image/vnd.wap.wbmp",
+	".ras":     "image/x-cmu-raster",
+	".pnm":     "image/x-portable-anymap",
+	".pbm":     "image/x-portable-bitmap",
+	".pgm":     "image/x-portable-graymap",
+	".ppm":     "image/x-portable-pixmap",
+	".rgb":     "image/x-rgb",
+	".xbm":     "image/x-xbitmap",
+	".xpm":     "image/x-xpixmap",
+	".xwd":     "image/x-xwindowdump",
+	".css":     "text/css",
+	".rtx":     "text/richtext",
+	".tsv":     "text/tab-separated-values",
+	".jad":     "text/vnd.sun.j2me.app-descriptor",
+	".wml":     "text/vnd.wap.wml",
+	".wmls":    "text/vnd.wap.wmlscript",
+	".etx":     "text/x-setext",
+	".mxu":     "video/vnd.mpegurl",
+	".flv":     "video/x-flv",
+	".wm":      "video/x-ms-wm",
+	".wmv":     "video/x-ms-wmv",
+	".wmx":     "video/x-ms-wmx",
+	".wvx":     "video/x-ms-wvx",
+	".avi":     "video/x-msvideo",
+	".movie":   "video/x-sgi-movie",
+	".ice":     "x-conference/x-cooltalk",
+	".3gp":     "video/3gpp",
+	".ai":      "application/postscript",
+	".aif":     "audio/x-aiff",
+	".aifc":    "audio/x-aiff",
+	".aiff":    "audio/x-aiff",
+	".asc":     "text/plain",
+	".atom":    "application/atom+xml",
+	".au":      "audio/basic",
+	".bin":     "application/octet-stream",
+	".cdf":     "application/x-netcdf",
+	".cgm":     "image/cgm",
+	".class":   "application/octet-stream",
+	".dcr":     "application/x-director",
+	".dif":     "video/x-dv",
+	".dir":     "application/x-director",
+	".djv":     "image/vnd.djvu",
+	".djvu":    "image/vnd.djvu",
+	".dll":     "application/octet-stream",
+	".dmg":     "application/octet-stream",
+	".dms":     "application/octet-stream",
+	".dtd":     "application/xml-dtd",
+	".dv":      "video/x-dv",
+	".dxr":     "application/x-director",
+	".eps":     "application/postscript",
+	".exe":     "application/octet-stream",
+	".ez":      "application/andrew-inset",
+	".gram":    "application/srgs",
+	".grxml":   "application/srgs+xml",
+	".gz":      "application/x-gzip",
+	".htm":     "text/html",
+	".html":    "text/html",
+	".ico":     "image/x-icon",
+	".ics":     "text/calendar",
+	".ifb":     "text/calendar",
+	".iges":    "model/iges",
+	".igs":     "model/iges",
+	".jp2":     "image/jp2",
+	".jpe":     "image/jpeg",
+	".jpeg":    "image/jpeg",
+	".jpg":     "image/jpeg",
+	".kar":     "audio/midi",
+	".lha":     "application/octet-stream",
+	".lzh":     "application/octet-stream",
+	".m4a":     "audio/mp4a-latm",
+	".m4p":     "audio/mp4a-latm",
+	".m4u":     "video/vnd.mpegurl",
+	".m4v":     "video/x-m4v",
+	".mac":     "image/x-macpaint",
+	".mathml":  "application/mathml+xml",
+	".mesh":    "model/mesh",
+	".mid":     "audio/midi",
+	".midi":    "audio/midi",
+	".mov":     "video/quicktime",
+	".mp2":     "audio/mpeg",
+	".mp3":     "audio/mpeg",
+	".mp4":     "video/mp4",
+	".mpe":     "video/mpeg",
+	".mpeg":    "video/mpeg",
+	".mpg":     "video/mpeg",
+	".mpga":    "audio/mpeg",
+	".msh":     "model/mesh",
+	".nc":      "application/x-netcdf",
+	".oda":     "application/oda",
+	".ogv":     "video/ogv",
+	".pct":     "image/pict",
+	".pic":     "image/pict",
+	".pict":    "image/pict",
+	".pnt":     "image/x-macpaint",
+	".pntg":    "image/x-macpaint",
+	".ps":      "application/postscript",
+	".qt":      "video/quicktime",
+	".qti":     "image/x-quicktime",
+	".qtif":    "image/x-quicktime",
+	".ram":     "audio/x-pn-realaudio",
+	".rdf":     "application/rdf+xml",
+	".rm":      "application/vnd.rn-realmedia",
+	".roff":    "application/x-troff",
+	".sgm":     "text/sgml",
+	".sgml":    "text/sgml",
+	".silo":    "model/mesh",
+	".skd":     "application/x-koan",
+	".skm":     "application/x-koan",
+	".skp":     "application/x-koan",
+	".skt":     "application/x-koan",
+	".smi":     "application/smil",
+	".smil":    "application/smil",
+	".snd":     "audio/basic",
+	".so":      "application/octet-stream",
+	".svg":     "image/svg+xml",
+	".t":       "application/x-troff",
+	".texi":    "application/x-texinfo",
+	".texinfo": "application/x-texinfo",
+	".tif":     "image/tiff",
+	".tiff":    "image/tiff",
+	".tr":      "application/x-troff",
+	".txt":     "text/plain",
+	".vrml":    "model/vrml",
+	".vxml":    "application/voicexml+xml",
+	".webm":    "video/webm",
+	".wrl":     "model/vrml",
+	".xht":     "application/xhtml+xml",
+	".xhtml":   "application/xhtml+xml",
+	".xml":     "application/xml",
+	".xsl":     "application/xml",
+	".xslt":    "application/xslt+xml",
+	".xul":     "application/vnd.mozilla.xul+xml",
+}
+
+// TypeByExtension returns the MIME type associated with the file extension ext.
+// 获取文件类型,选项ContentType使用
+func TypeByExtension(filePath string) string {
+	typ := mime.TypeByExtension(path.Ext(filePath))
+	if typ == "" {
+		typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
+	}
+	return typ
+}

+ 60 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go

@@ -0,0 +1,60 @@
+package oss
+
+import (
+	"hash"
+	"io"
+	"net/http"
+)
+
+// Response Http response from oss
+type Response struct {
+	StatusCode int
+	Headers    http.Header
+	Body       io.ReadCloser
+	ClientCRC  uint64
+	ServerCRC  uint64
+}
+
+// PutObjectRequest The request of DoPutObject
+type PutObjectRequest struct {
+	ObjectKey string
+	Reader    io.Reader
+}
+
+// GetObjectRequest The request of DoGetObject
+type GetObjectRequest struct {
+	ObjectKey string
+}
+
+// GetObjectResult The result of DoGetObject
+type GetObjectResult struct {
+	Response  *Response
+	ClientCRC hash.Hash64
+	ServerCRC uint64
+}
+
+// AppendObjectRequest  The requtest of DoAppendObject
+type AppendObjectRequest struct {
+	ObjectKey string
+	Reader    io.Reader
+	Position  int64
+}
+
+// AppendObjectResult The result of DoAppendObject
+type AppendObjectResult struct {
+	NextPosition int64
+	CRC          uint64
+}
+
+// UploadPartRequest The request of DoUploadPart
+type UploadPartRequest struct {
+	InitResult *InitiateMultipartUploadResult
+	Reader     io.Reader
+	PartSize   int64
+	PartNumber int
+}
+
+// UploadPartResult The result of DoUploadPart
+type UploadPartResult struct {
+	Part UploadPart
+}

+ 414 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go

@@ -0,0 +1,414 @@
+package oss
+
+import (
+	"crypto/md5"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+)
+
+//
+// CopyFile 分片复制文件
+//
+// srcBucketName  源Bucket名称。
+// srcObjectKey   源Object名称。
+// destObjectKey   目标Object名称。目标Bucket名称为Bucket.BucketName。
+// partSize   复制文件片的大小,字节数。比如100 * 1024为每片100KB。
+// options    Object的属性限制项。详见InitiateMultipartUpload。
+//
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
+	destBucketName := bucket.BucketName
+	if partSize < MinPartSize || partSize > MaxPartSize {
+		return errors.New("oss: part size invalid range (1024KB, 5GB]")
+	}
+
+	cpConf, err := getCpConfig(options, filepath.Base(destObjectKey))
+	if err != nil {
+		return err
+	}
+
+	routines := getRoutines(options)
+
+	if cpConf.IsEnable {
+		return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, 
+			partSize, options, cpConf.FilePath, routines)
+	}
+
+	return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey, 
+		partSize, options, routines)
+}
+
+// ----- 并发无断点的下载  -----
+
+// 工作协程参数
+type copyWorkerArg struct {
+	bucket         *Bucket
+	imur           InitiateMultipartUploadResult
+	srcBucketName  string  
+	srcObjectKey   string  
+	options        []Option
+	hook           copyPartHook
+}
+
+// Hook用于测试
+type copyPartHook func(part copyPart) error
+
+var copyPartHooker copyPartHook = defaultCopyPartHook
+
+func defaultCopyPartHook(part copyPart) error {
+	return nil
+}
+
+// 工作协程
+func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <- chan bool) {	
+	for chunk := range jobs {
+		if err := arg.hook(chunk); err != nil {
+			failed <- err
+			break
+		}
+		chunkSize := chunk.End - chunk.Start + 1
+		part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey, 
+			chunk.Start, chunkSize, chunk.Number, arg.options...)
+		if err != nil {
+			failed <- err
+			break
+		}
+		select {
+			case <-die:
+				return
+			default:
+		}
+		results <- part
+	}
+}
+
+// 调度协程
+func copyScheduler(jobs chan copyPart, parts []copyPart) {
+	for _, part := range parts {
+		jobs <- part
+	}
+	close(jobs)
+}
+
+// 分片
+type copyPart struct {
+	Number int  // 片序号[1, 10000]
+	Start int64 // 片起始位置
+	End   int64 // 片结束位置
+}
+
+// 文件分片
+func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) {
+	meta, err := bucket.GetObjectDetailedMeta(objectKey)
+	if err != nil {
+		return nil, err
+	}
+
+	parts := []copyPart{}
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	part := copyPart{}
+	i := 0
+	for offset := int64(0); offset < objectSize; offset += partSize {
+		part.Number = i + 1
+		part.Start = offset
+		part.End = GetPartEnd(offset, objectSize, partSize)
+		parts = append(parts, part)
+		i++
+	}
+	return parts, nil
+}
+
+// 并发无断点续传的下载
+func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, 
+	partSize int64, options []Option, routines int) error {
+	descBucket, err := bucket.Client.Bucket(destBucketName)
+	srcBucket, err := bucket.Client.Bucket(srcBucketName)
+	
+	// 分割文件
+	parts, err := getCopyParts(srcBucket, srcObjectKey, partSize)
+	if err != nil {
+		return err
+	}
+	
+	// 初始化上传任务
+	imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
+	if err != nil {
+		return err
+	}
+
+	jobs := make(chan copyPart, len(parts))
+	results := make(chan UploadPart, len(parts))
+	failed := make(chan error)
+	die := make(chan bool)
+
+	// 启动工作协程
+	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
+	for w := 1; w <= routines; w++ {
+		go copyWorker(w, arg, jobs, results, failed, die)
+	}
+
+	// 并发上传分片
+	go copyScheduler(jobs, parts)
+
+	// 等待分片下载完成
+	completed := 0
+	ups := make([]UploadPart, len(parts))
+	for completed < len(parts) {
+		select {
+		case part := <-results:
+			completed++
+			ups[part.PartNumber-1] = part
+		case err := <-failed:
+			close(die)
+			descBucket.AbortMultipartUpload(imur)
+			return err
+		}
+
+		if completed >= len(parts) {
+			break
+		}
+	}
+
+	// 提交任务
+	_, err = descBucket.CompleteMultipartUpload(imur, ups)
+	if err != nil {
+		bucket.AbortMultipartUpload(imur)
+		return err
+	}
+	return nil
+}
+
+// ----- 并发有断点的下载  -----
+
+const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
+
+type copyCheckpoint struct {
+	Magic    string          // magic
+	MD5      string          // cp内容的MD5
+	SrcBucketName  string    // 源Bucket
+	SrcObjectKey   string    // 源Object
+	DestBucketName string    // 目标Bucket
+	DestObjectKey  string    // 目标Bucket
+	CopyID         string    // copy id
+	ObjStat   objectStat     // 文件状态
+	Parts     []copyPart     // 全部分片
+	CopyParts []UploadPart   // 分片上传成功后的返回值
+	PartStat  []bool         // 分片下载是否完成
+}
+
+// CP数据是否有效,CP有效且Object没有更新时有效
+func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
+	// 比较CP的Magic及MD5
+	cpb := cp
+	cpb.MD5 = ""
+	js, _ := json.Marshal(cpb)
+	sum := md5.Sum(js)
+	b64 := base64.StdEncoding.EncodeToString(sum[:])
+
+	if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
+		return false, nil
+	}
+
+	// 确认object没有更新
+	meta, err := bucket.GetObjectDetailedMeta(objectKey)
+	if err != nil {
+		return false, err
+	}
+
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+	if err != nil {
+		return false, err
+	}
+
+	// 比较Object的大小/最后修改时间/etag
+	if cp.ObjStat.Size != objectSize ||
+		cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
+		cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
+		return false, nil
+	}
+
+	return true, nil
+}
+
+// 从文件中load
+func (cp *copyCheckpoint) load(filePath string) error {
+	contents, err := ioutil.ReadFile(filePath)
+	if err != nil {
+		return err
+	}
+
+	err = json.Unmarshal(contents, cp)
+	return err
+}
+
+// 更新分片状态
+func (cp *copyCheckpoint) update(part UploadPart) {
+	cp.CopyParts[part.PartNumber - 1] = part
+	cp.PartStat[part.PartNumber - 1] = true	
+}
+
+// dump到文件
+func (cp *copyCheckpoint) dump(filePath string) error {
+	bcp := *cp
+
+	// 计算MD5
+	bcp.MD5 = ""
+	js, err := json.Marshal(bcp)
+	if err != nil {
+		return err
+	}
+	sum := md5.Sum(js)
+	b64 := base64.StdEncoding.EncodeToString(sum[:])
+	bcp.MD5 = b64
+
+	// 序列化
+	js, err = json.Marshal(bcp)
+	if err != nil {
+		return err
+	}
+
+	// dump
+	return ioutil.WriteFile(filePath, js, 0644)
+}
+
+// 未完成的分片
+func (cp copyCheckpoint) todoParts() []copyPart {
+	dps := []copyPart{}
+	for i, ps := range cp.PartStat {
+		if !ps {
+			dps = append(dps, cp.Parts[i])
+		}
+	}
+	return dps
+}
+
+// 初始化下载任务
+func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string, 
+	partSize int64, options []Option) error {
+	// cp
+	cp.Magic = copyCpMagic
+	cp.SrcBucketName = srcBucket.BucketName
+	cp.SrcObjectKey = srcObjectKey
+	cp.DestBucketName = destBucket.BucketName
+	cp.DestObjectKey = destObjectKey
+
+	// object
+	meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey)
+	if err != nil {
+		return err
+	}
+
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+	if err != nil {
+		return err
+	}
+
+	cp.ObjStat.Size = objectSize
+	cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
+	cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
+
+	// parts
+	cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize)
+	if err != nil {
+		return err
+	}
+	cp.PartStat = make([]bool, len(cp.Parts))
+	for i := range cp.PartStat {
+		cp.PartStat[i] = false
+	}
+	cp.CopyParts = make([]UploadPart, len(cp.Parts))
+	
+	// init copy
+	imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
+	if err != nil {
+		return err
+	}
+	cp.CopyID = imur.UploadID
+
+	return nil
+}
+
+func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error {
+	imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
+		Key: cp.DestObjectKey, UploadID: cp.CopyID}
+	_, err := bucket.CompleteMultipartUpload(imur, parts)
+	if err != nil {
+		return err
+	}
+	os.Remove(cpFilePath)
+	return err
+}
+
+// 并发带断点的下载
+func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, 
+	partSize int64, options []Option, cpFilePath string, routines int) error {
+	descBucket, err := bucket.Client.Bucket(destBucketName)
+	srcBucket, err := bucket.Client.Bucket(srcBucketName)
+	
+	// LOAD CP数据
+	ccp := copyCheckpoint{}
+	err = ccp.load(cpFilePath)
+	if err != nil {
+		os.Remove(cpFilePath)
+	}
+
+	// LOAD出错或数据无效重新初始化下载
+	valid, err := ccp.isValid(srcBucket, srcObjectKey)
+	if err != nil || !valid {
+		if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
+			return err
+		}
+		os.Remove(cpFilePath)
+	}
+
+	// 未完成的分片
+	parts := ccp.todoParts()
+	imur := InitiateMultipartUploadResult{
+		Bucket:   destBucketName,
+		Key:      destObjectKey,
+		UploadID: ccp.CopyID}
+	
+	jobs := make(chan copyPart, len(parts))
+	results := make(chan UploadPart, len(parts))
+	failed := make(chan error)
+	die := make(chan bool)
+
+	// 启动工作协程
+	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
+	for w := 1; w <= routines; w++ {
+		go copyWorker(w, arg, jobs, results, failed, die)
+	}
+
+	// 并发下载分片
+	go copyScheduler(jobs, parts)
+
+	// 等待分片下载完成
+	completed := 0
+	for completed < len(parts) {
+		select {
+		case part := <-results:
+			completed++
+			ccp.update(part);
+			ccp.dump(cpFilePath)
+		case err := <-failed:
+			close(die)
+			return err
+		}
+
+		if completed >= len(parts) {
+			break
+		}
+	}
+	
+	return ccp.complete(descBucket, ccp.CopyParts, cpFilePath)
+}

+ 468 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy_test.go

@@ -0,0 +1,468 @@
+package oss
+
+import (
+	"fmt"
+	"os"
+	"time"
+
+	. "gopkg.in/check.v1"
+)
+
+type OssCopySuite struct {
+	client *Client
+	bucket *Bucket
+}
+
+var _ = Suite(&OssCopySuite{})
+
+// Run once when the suite starts running
+func (s *OssCopySuite) SetUpSuite(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+	s.client = client
+
+	s.client.CreateBucket(bucketName)
+	time.Sleep(5 * time.Second)
+
+	bucket, err := s.client.Bucket(bucketName)
+	c.Assert(err, IsNil)
+	s.bucket = bucket
+
+	testLogger.Println("test copy started")
+}
+
+// Run before each test or benchmark starts running
+func (s *OssCopySuite) TearDownSuite(c *C) {
+	// Delete Part
+	lmur, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+
+	for _, upload := range lmur.Uploads {
+		var imur = InitiateMultipartUploadResult{Bucket: bucketName,
+			Key: upload.Key, UploadID: upload.UploadID}
+		err = s.bucket.AbortMultipartUpload(imur)
+		c.Assert(err, IsNil)
+	}
+
+	//Delete Objects
+	lor, err := s.bucket.ListObjects()
+	c.Assert(err, IsNil)
+
+	for _, object := range lor.Objects {
+		err = s.bucket.DeleteObject(object.Key)
+		c.Assert(err, IsNil)
+	}
+
+	testLogger.Println("test copy completed")
+}
+
+// Run after each test or benchmark runs
+func (s *OssCopySuite) SetUpTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+}
+
+// Run once after all tests or benchmarks have finished running
+func (s *OssCopySuite) TearDownTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+}
+
+// TestCopyRoutineWithoutRecovery 多线程无断点恢复的复制
+func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
+	srcObjectName := objectNamePrefix + "tcrwr"
+	destObjectName := srcObjectName + "-copy"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "copy-new-file.jpg"
+
+	// 上传源文件
+	err := s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 不指定Routines,默认单线程
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024)
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 指定线程数1
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(1))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 指定线程数3,小于分片数5
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 指定线程数5,等于分片数
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(5))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 指定线程数10,大于分片数5
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(10))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 线程值无效自动变成1
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(-1))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// option
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(3), Meta("myprop", "mypropval"))
+
+	meta, err := s.bucket.GetObjectDetailedMeta(destObjectName)
+	c.Assert(err, IsNil)
+	c.Assert(meta.Get("X-Oss-Meta-Myprop"), Equals, "mypropval")
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	err = s.bucket.DeleteObject(srcObjectName)
+	c.Assert(err, IsNil)
+}
+
+// CopyErrorHooker CopyPart请求Hook
+func CopyErrorHooker(part copyPart) error {
+	if part.Number == 5 {
+		time.Sleep(time.Second)
+		return fmt.Errorf("ErrorHooker")
+	}
+	return nil
+}
+
+// TestCopyRoutineWithoutRecoveryNegative 多线程无断点恢复的复制
+func (s *OssCopySuite) TestCopyRoutineWithoutRecoveryNegative(c *C) {
+	srcObjectName := objectNamePrefix + "tcrwrn"
+	destObjectName := srcObjectName + "-copy"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+
+	// 上传源文件
+	err := s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+
+	copyPartHooker = CopyErrorHooker
+	// worker线程错误
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(2))
+
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	copyPartHooker = defaultCopyPartHook
+
+	// 源Bucket不存在
+	err = s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Routines(2))
+	c.Assert(err, NotNil)
+
+	// 源Object不存在
+	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2))
+
+	// 指定的分片大小无效
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Routines(2))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*1024*1024*100, Routines(2))
+	c.Assert(err, NotNil)
+
+	// 删除源文件
+	err = s.bucket.DeleteObject(srcObjectName)
+	c.Assert(err, IsNil)
+}
+
+// TestCopyRoutineWithRecovery 多线程且有断点恢复的复制
+func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
+	srcObjectName := objectNamePrefix + "tcrtr"
+	destObjectName := srcObjectName + "-copy"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "copy-new-file.jpg"
+
+	// 上传源文件
+	err := s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// Routines默认值,CP开启默认路径是destObjectName+.cp
+	// 第一次上传,上传4片
+	copyPartHooker = CopyErrorHooker
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	copyPartHooker = defaultCopyPartHook
+
+	// check cp
+	ccp := copyCheckpoint{}
+	err = ccp.load(destObjectName + ".cp")
+	c.Assert(err, IsNil)
+	c.Assert(ccp.Magic, Equals, copyCpMagic)
+	c.Assert(len(ccp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
+	c.Assert(ccp.SrcBucketName, Equals, bucketName)
+	c.Assert(ccp.SrcObjectKey, Equals, srcObjectName)
+	c.Assert(ccp.DestBucketName, Equals, bucketName)
+	c.Assert(ccp.DestObjectKey, Equals, destObjectName)
+	c.Assert(len(ccp.CopyID), Equals, len("3F79722737D1469980DACEDCA325BB52"))
+	c.Assert(ccp.ObjStat.Size, Equals, int64(482048))
+	c.Assert(len(ccp.ObjStat.LastModified), Equals, len("2015-12-17 18:43:03 +0800 CST"))
+	c.Assert(ccp.ObjStat.Etag, Equals, "\"2351E662233817A7AE974D8C5B0876DD-5\"")
+	c.Assert(len(ccp.Parts), Equals, 5)
+	c.Assert(len(ccp.todoParts()), Equals, 1)
+	c.Assert(ccp.PartStat[4], Equals, false)
+
+	// 第二次上传,完成剩余的一片
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	err = ccp.load(fileName + ".cp")
+	c.Assert(err, NotNil)
+
+	// Routines指定,CP指定
+	copyPartHooker = CopyErrorHooker
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), Checkpoint(true, srcObjectName+".cp"))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	copyPartHooker = defaultCopyPartHook
+
+	// check cp
+	ccp = copyCheckpoint{}
+	err = ccp.load(srcObjectName + ".cp")
+	c.Assert(err, IsNil)
+	c.Assert(ccp.Magic, Equals, copyCpMagic)
+	c.Assert(len(ccp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
+	c.Assert(ccp.SrcBucketName, Equals, bucketName)
+	c.Assert(ccp.SrcObjectKey, Equals, srcObjectName)
+	c.Assert(ccp.DestBucketName, Equals, bucketName)
+	c.Assert(ccp.DestObjectKey, Equals, destObjectName)
+	c.Assert(len(ccp.CopyID), Equals, len("3F79722737D1469980DACEDCA325BB52"))
+	c.Assert(ccp.ObjStat.Size, Equals, int64(482048))
+	c.Assert(len(ccp.ObjStat.LastModified), Equals, len("2015-12-17 18:43:03 +0800 CST"))
+	c.Assert(ccp.ObjStat.Etag, Equals, "\"2351E662233817A7AE974D8C5B0876DD-5\"")
+	c.Assert(len(ccp.Parts), Equals, 5)
+	c.Assert(len(ccp.todoParts()), Equals, 1)
+	c.Assert(ccp.PartStat[4], Equals, false)
+
+	// 第二次上传,完成剩余的一片
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), Checkpoint(true, srcObjectName+".cp"))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	err = ccp.load(srcObjectName + ".cp")
+	c.Assert(err, NotNil)
+
+	// 一次完成上传,中间没有错误
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(3), Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 用多协程下载,中间没有错误
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(10), Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// option
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, ""), Meta("myprop", "mypropval"))
+	c.Assert(err, IsNil)
+
+	meta, err := s.bucket.GetObjectDetailedMeta(destObjectName)
+	c.Assert(err, IsNil)
+	c.Assert(meta.Get("X-Oss-Meta-Myprop"), Equals, "mypropval")
+
+	err = s.bucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 删除源文件
+	err = s.bucket.DeleteObject(srcObjectName)
+	c.Assert(err, IsNil)
+}
+
+// TestCopyRoutineWithRecoveryNegative 多线程无断点恢复的复制
+func (s *OssCopySuite) TestCopyRoutineWithRecoveryNegative(c *C) {
+	srcObjectName := objectNamePrefix + "tcrwrn"
+	destObjectName := srcObjectName + "-copy"
+
+	// 源Bucket不存在
+	err := s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err, NotNil)
+
+	// 源Object不存在
+	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2), Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	// 指定的分片大小无效
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*1024*1024*100, Routines(2), Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+}
+
+// TestCopyFileCrossBucket 跨Bucket直接的复制
+func (s *OssCopySuite) TestCopyFileCrossBucket(c *C) {
+	destBucketName := bucketName + "-cfcb-desc"
+	srcObjectName := objectNamePrefix + "tcrtr"
+	destObjectName := srcObjectName + "-copy"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "copy-new-file.jpg"
+
+	destBucket, err := s.client.Bucket(destBucketName)
+	c.Assert(err, IsNil)
+
+	// 创建目标Bucket
+	err = s.client.CreateBucket(destBucketName)
+
+	// 上传源文件
+	err = s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 复制文件
+	err = destBucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	err = destBucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = destBucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 带option的复制
+	err = destBucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(10), Checkpoint(true, "copy.cp"), Meta("myprop", "mypropval"))
+	c.Assert(err, IsNil)
+
+	err = destBucket.GetObjectToFile(destObjectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = destBucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// 删除目标Bucket
+	err = s.client.DeleteBucket(destBucketName)
+	c.Assert(err, IsNil)
+}

+ 280 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go

@@ -0,0 +1,280 @@
+package oss
+
+import (
+	"bytes"
+	"encoding/xml"
+	"io"
+	"net/http"
+	"os"
+	"sort"
+	"strconv"
+)
+
+//
+// InitiateMultipartUpload 初始化分片上传任务。
+//
+// objectKey  Object名称。
+// options    上传时可以指定Object的属性,可选属性有CacheControl、ContentDisposition、ContentEncoding、Expires、
+// ServerSideEncryption、Meta,具体含义请参考
+// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
+//
+// InitiateMultipartUploadResult 初始化后操作成功的返回值,用于后面的UploadPartFromFile、UploadPartCopy等操作。error为nil时有效。
+// error  操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
+	var imur InitiateMultipartUploadResult
+	opts := addContentType(options, objectKey)
+	resp, err := bucket.do("POST", objectKey, "uploads", "uploads", opts, nil)
+	if err != nil {
+		return imur, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &imur)
+	return imur, err
+}
+
+//
+// UploadPart 上传分片。
+//
+// 初始化一个Multipart Upload之后,可以根据指定的Object名和Upload ID来分片(Part)上传数据。
+// 每一个上传的Part都有一个标识它的号码(part number,范围是1~10000)。对于同一个Upload ID,
+// 该号码不但唯一标识这一片数据,也标识了这片数据在整个文件内的相对位置。如果您用同一个part号码,上传了新的数据,
+// 那么OSS上已有的这个号码的Part数据将被覆盖。除了最后一片Part以外,其他的part最小为100KB;
+// 最后一片Part没有大小限制。
+//
+// imur        InitiateMultipartUpload成功后的返回值。
+// reader      io.Reader 需要分片上传的reader。
+// size        本次上传片Part的大小。
+// partNumber  本次上传片(Part)的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。
+//
+// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,即传入参数partNumber;
+// ETag及上传数据的MD5。error为nil时有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
+	partSize int64, partNumber int) (UploadPart, error) {
+	request := &UploadPartRequest{
+		InitResult: &imur,
+		Reader:     reader,
+		PartSize:   partSize,
+		PartNumber: partNumber,
+	}
+
+	result, err := bucket.DoUploadPart(request)
+
+	return result.Part, err
+}
+
+//
+// UploadPartFromFile 上传分片。
+//
+// imur           InitiateMultipartUpload成功后的返回值。
+// filePath       需要分片上传的本地文件。
+// startPosition  本次上传文件片的起始位置。
+// partSize       本次上传文件片的大小。
+// partNumber     本次上传文件片的编号,范围是1~10000。
+//
+// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,传入参数partNumber;
+// ETag上传数据的MD5。error为nil时有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
+	startPosition, partSize int64, partNumber int) (UploadPart, error) {
+	var part = UploadPart{}
+	fd, err := os.Open(filePath)
+	if err != nil {
+		return part, err
+	}
+	defer fd.Close()
+	fd.Seek(startPosition, os.SEEK_SET)
+
+	request := &UploadPartRequest{
+		InitResult: &imur,
+		Reader:     fd,
+		PartSize:   partSize,
+		PartNumber: partNumber,
+	}
+
+	result, err := bucket.DoUploadPart(request)
+
+	return result.Part, err
+}
+
+//
+// DoUploadPart 上传分片。
+//
+// request 上传分片请求。
+//
+// UploadPartResult 上传分片请求返回值。
+// error  操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoUploadPart(request *UploadPartRequest) (*UploadPartResult, error) {
+	params := "partNumber=" + strconv.Itoa(request.PartNumber) + "&uploadId=" + request.InitResult.UploadID
+	opts := []Option{ContentLength(request.PartSize)}
+	resp, err := bucket.do("PUT", request.InitResult.Key, params, params, opts,
+		&io.LimitedReader{R: request.Reader, N: request.PartSize})
+	if err != nil {
+		return &UploadPartResult{}, err
+	}
+	defer resp.Body.Close()
+
+	part := UploadPart{
+		ETag:       resp.Headers.Get(HTTPHeaderEtag),
+		PartNumber: request.PartNumber,
+	}
+
+	if bucket.getConfig().IsEnableCRC {
+		err = checkCRC(resp, "DoUploadPart")
+		if err != nil {
+			return &UploadPartResult{part}, err
+		}
+	}
+
+	return &UploadPartResult{part}, nil
+}
+
+//
+// UploadPartCopy 拷贝分片。
+//
+// imur           InitiateMultipartUpload成功后的返回值。
+// copySrc        源Object名称。
+// startPosition  本次拷贝片(Part)在源Object的起始位置。
+// partSize       本次拷贝片的大小。
+// partNumber     本次拷贝片的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。
+// options        copy时源Object的限制条件,满足限制条件时copy,不满足时返回错误。可选条件有CopySourceIfMatch、
+// CopySourceIfNoneMatch、CopySourceIfModifiedSince  CopySourceIfUnmodifiedSince,具体含义请参看
+// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
+//
+// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片(Part)编号,即传入参数partNumber;
+// ETag及上传数据的MD5。error为nil时有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
+	startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
+	var out UploadPartCopyResult
+	var part UploadPart
+
+	opts := []Option{CopySource(srcBucketName, srcObjectKey),
+		CopySourceRange(startPosition, partSize)}
+	opts = append(opts, options...)
+	params := "partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + imur.UploadID
+	resp, err := bucket.do("PUT", imur.Key, params, params, opts, nil)
+	if err != nil {
+		return part, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	if err != nil {
+		return part, err
+	}
+	part.ETag = out.ETag
+	part.PartNumber = partNumber
+
+	return part, nil
+}
+
+//
+// CompleteMultipartUpload 提交分片上传任务。
+//
+// imur   InitiateMultipartUpload的返回值。
+// parts  UploadPart/UploadPartFromFile/UploadPartCopy返回值组成的数组。
+//
+// CompleteMultipartUploadResponse  操作成功后的返回值。error为nil时有效。
+// error  操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
+	parts []UploadPart) (CompleteMultipartUploadResult, error) {
+	var out CompleteMultipartUploadResult
+
+	sort.Sort(uploadParts(parts))
+	cxml := completeMultipartUploadXML{}
+	cxml.Part = parts
+	bs, err := xml.Marshal(cxml)
+	if err != nil {
+		return out, err
+	}
+	buffer := new(bytes.Buffer)
+	buffer.Write(bs)
+
+	params := "uploadId=" + imur.UploadID
+	resp, err := bucket.do("POST", imur.Key, params, params, nil, buffer)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// AbortMultipartUpload 取消分片上传任务。
+//
+// imur  InitiateMultipartUpload的返回值。
+//
+// error  操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error {
+	params := "uploadId=" + imur.UploadID
+	resp, err := bucket.do("DELETE", imur.Key, params, params, nil, nil)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// ListUploadedParts 列出指定上传任务已经上传的分片。
+//
+// imur  InitiateMultipartUpload的返回值。
+//
+// ListUploadedPartsResponse  操作成功后的返回值,成员UploadedParts已经上传/拷贝的片。error为nil时该返回值有效。
+// error  操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) {
+	var out ListUploadedPartsResult
+	params := "uploadId=" + imur.UploadID
+	resp, err := bucket.do("GET", imur.Key, params, params, nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+//
+// ListMultipartUploads 列出所有未上传完整的multipart任务列表。
+//
+// options  ListObject的筛选行为。Prefix返回object的前缀,KeyMarker返回object的起始位置,MaxUploads最大数目默认1000,
+// Delimiter用于对Object名字进行分组的字符,所有名字包含指定的前缀且第一次出现delimiter字符之间的object。
+//
+// ListMultipartUploadResponse  操作成功后的返回值,error为nil时该返回值有效。
+// error  操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
+	var out ListMultipartUploadResult
+
+	options = append(options, EncodingType("url"))
+	params, err := handleParams(options)
+	if err != nil {
+		return out, err
+	}
+
+	resp, err := bucket.do("GET", "", "uploads&"+params, "uploads", nil, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	if err != nil {
+		return out, err
+	}
+	err = decodeListMultipartUploadResult(&out)
+	return out, err
+}

+ 946 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart_test.go

@@ -0,0 +1,946 @@
+// multipart test
+
+package oss
+
+import (
+	"math/rand"
+	"net/http"
+	"os"
+	"strconv"
+	"time"
+
+	. "gopkg.in/check.v1"
+)
+
+type OssBucketMultipartSuite struct {
+	client *Client
+	bucket *Bucket
+}
+
+var _ = Suite(&OssBucketMultipartSuite{})
+
+// Run once when the suite starts running
+func (s *OssBucketMultipartSuite) SetUpSuite(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+	s.client = client
+
+	s.client.CreateBucket(bucketName)
+	time.Sleep(5 * time.Second)
+
+	bucket, err := s.client.Bucket(bucketName)
+	c.Assert(err, IsNil)
+	s.bucket = bucket
+
+	// Delete Part
+	lmur, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+
+	for _, upload := range lmur.Uploads {
+		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+			Key: upload.Key, UploadID: upload.UploadID}
+		err = s.bucket.AbortMultipartUpload(imur)
+		c.Assert(err, IsNil)
+	}
+
+	// Delete Objects
+	lor, err := s.bucket.ListObjects()
+	c.Assert(err, IsNil)
+
+	for _, object := range lor.Objects {
+		err = s.bucket.DeleteObject(object.Key)
+		c.Assert(err, IsNil)
+	}
+
+	testLogger.Println("test multipart started")
+}
+
+// Run before each test or benchmark starts running
+func (s *OssBucketMultipartSuite) TearDownSuite(c *C) {
+	// Delete Part
+	lmur, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+
+	for _, upload := range lmur.Uploads {
+		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+			Key: upload.Key, UploadID: upload.UploadID}
+		err = s.bucket.AbortMultipartUpload(imur)
+		c.Assert(err, IsNil)
+	}
+
+	// Delete Objects
+	lor, err := s.bucket.ListObjects()
+	c.Assert(err, IsNil)
+
+	for _, object := range lor.Objects {
+		err = s.bucket.DeleteObject(object.Key)
+		c.Assert(err, IsNil)
+	}
+
+	testLogger.Println("test multipart completed")
+}
+
+// Run after each test or benchmark runs
+func (s *OssBucketMultipartSuite) SetUpTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+}
+
+// Run once after all tests or benchmarks have finished running
+func (s *OssBucketMultipartSuite) TearDownTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+
+	err = removeTempFiles("../oss", ".txt1")
+	c.Assert(err, IsNil)
+
+	err = removeTempFiles("../oss", ".txt2")
+	c.Assert(err, IsNil)
+}
+
+// TestMultipartUpload
+func (s *OssBucketMultipartSuite) TestMultipartUpload(c *C) {
+	objectName := objectNamePrefix + "tmu"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	chunks, err := SplitFileByPartNum(fileName, 3)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	options := []Option{
+		Expires(futureDate), Meta("my", "myprop"),
+	}
+
+	fd, err := os.Open(fileName)
+	c.Assert(err, IsNil)
+	defer fd.Close()
+
+	imur, err := s.bucket.InitiateMultipartUpload(objectName, options...)
+	c.Assert(err, IsNil)
+	var parts []UploadPart
+	for _, chunk := range chunks {
+		fd.Seek(chunk.Offset, os.SEEK_SET)
+		part, err := s.bucket.UploadPart(imur, fd, chunk.Size, chunk.Number)
+		c.Assert(err, IsNil)
+		parts = append(parts, part)
+	}
+
+	cmur, err := s.bucket.CompleteMultipartUpload(imur, parts)
+	c.Assert(err, IsNil)
+	testLogger.Println("cmur:", cmur)
+
+	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta:", meta)
+	c.Assert(meta.Get("X-Oss-Meta-My"), Equals, "myprop")
+	c.Assert(meta.Get("Expires"), Equals, futureDate.Format(http.TimeFormat))
+	c.Assert(meta.Get("X-Oss-Object-Type"), Equals, "Multipart")
+
+	err = s.bucket.GetObjectToFile(objectName, "newpic1.jpg")
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+// TestMultipartUpload
+func (s *OssBucketMultipartSuite) TestMultipartUploadFromFile(c *C) {
+	objectName := objectNamePrefix + "tmuff"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	chunks, err := SplitFileByPartNum(fileName, 3)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	options := []Option{
+		Expires(futureDate), Meta("my", "myprop"),
+	}
+	imur, err := s.bucket.InitiateMultipartUpload(objectName, options...)
+	c.Assert(err, IsNil)
+	var parts []UploadPart
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartFromFile(imur, fileName, chunk.Offset, chunk.Size, chunk.Number)
+		c.Assert(err, IsNil)
+		parts = append(parts, part)
+	}
+
+	cmur, err := s.bucket.CompleteMultipartUpload(imur, parts)
+	c.Assert(err, IsNil)
+	testLogger.Println("cmur:", cmur)
+
+	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta:", meta)
+	c.Assert(meta.Get("X-Oss-Meta-My"), Equals, "myprop")
+	c.Assert(meta.Get("Expires"), Equals, futureDate.Format(http.TimeFormat))
+	c.Assert(meta.Get("X-Oss-Object-Type"), Equals, "Multipart")
+
+	err = s.bucket.GetObjectToFile(objectName, "newpic1.jpg")
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+// TestUploadPartCopy
+func (s *OssBucketMultipartSuite) TestUploadPartCopy(c *C) {
+	objectSrc := objectNamePrefix + "tupc" + "src"
+	objectDesc := objectNamePrefix + "tupc" + "desc"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	chunks, err := SplitFileByPartNum(fileName, 3)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
+	c.Assert(err, IsNil)
+
+	options := []Option{
+		Expires(futureDate), Meta("my", "myprop"),
+	}
+	imur, err := s.bucket.InitiateMultipartUpload(objectDesc, options...)
+	c.Assert(err, IsNil)
+	var parts []UploadPart
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		parts = append(parts, part)
+	}
+
+	cmur, err := s.bucket.CompleteMultipartUpload(imur, parts)
+	c.Assert(err, IsNil)
+	testLogger.Println("cmur:", cmur)
+
+	meta, err := s.bucket.GetObjectDetailedMeta(objectDesc)
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta:", meta)
+	c.Assert(meta.Get("X-Oss-Meta-My"), Equals, "myprop")
+	c.Assert(meta.Get("Expires"), Equals, futureDate.Format(http.TimeFormat))
+	c.Assert(meta.Get("X-Oss-Object-Type"), Equals, "Multipart")
+
+	err = s.bucket.GetObjectToFile(objectDesc, "newpic2.jpg")
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectSrc)
+	c.Assert(err, IsNil)
+	err = s.bucket.DeleteObject(objectDesc)
+	c.Assert(err, IsNil)
+}
+
+func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
+	objectName := objectNamePrefix + "tlup"
+	objectSrc := objectNamePrefix + "tlup" + "src"
+	objectDesc := objectNamePrefix + "tlup" + "desc"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	chunks, err := SplitFileByPartSize(fileName, 100*1024)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
+	c.Assert(err, IsNil)
+
+	// upload
+	imurUpload, err := s.bucket.InitiateMultipartUpload(objectName)
+	var partsUpload []UploadPart
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartFromFile(imurUpload, fileName, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		partsUpload = append(partsUpload, part)
+	}
+
+	// copy
+	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	var partsCopy []UploadPart
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartCopy(imurCopy, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		partsCopy = append(partsCopy, part)
+	}
+
+	// list
+	lupr, err := s.bucket.ListUploadedParts(imurUpload)
+	c.Assert(err, IsNil)
+	testLogger.Println("lupr:", lupr)
+	c.Assert(len(lupr.UploadedParts), Equals, len(chunks))
+
+	lupr, err = s.bucket.ListUploadedParts(imurCopy)
+	c.Assert(err, IsNil)
+	testLogger.Println("lupr:", lupr)
+	c.Assert(len(lupr.UploadedParts), Equals, len(chunks))
+
+	lmur, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+	testLogger.Println("lmur:", lmur)
+
+	// complete
+	_, err = s.bucket.CompleteMultipartUpload(imurUpload, partsUpload)
+	c.Assert(err, IsNil)
+	_, err = s.bucket.CompleteMultipartUpload(imurCopy, partsCopy)
+	c.Assert(err, IsNil)
+
+	// download
+	err = s.bucket.GetObjectToFile(objectDesc, "newpic3.jpg")
+	c.Assert(err, IsNil)
+	err = s.bucket.GetObjectToFile(objectName, "newpic4.jpg")
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+	err = s.bucket.DeleteObject(objectDesc)
+	c.Assert(err, IsNil)
+	err = s.bucket.DeleteObject(objectSrc)
+	c.Assert(err, IsNil)
+}
+
+func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
+	objectName := objectNamePrefix + "tamu"
+	objectSrc := objectNamePrefix + "tamu" + "src"
+	objectDesc := objectNamePrefix + "tamu" + "desc"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	chunks, err := SplitFileByPartSize(fileName, 100*1024)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
+	c.Assert(err, IsNil)
+
+	// upload
+	imurUpload, err := s.bucket.InitiateMultipartUpload(objectName)
+	var partsUpload []UploadPart
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartFromFile(imurUpload, fileName, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		partsUpload = append(partsUpload, part)
+	}
+
+	// copy
+	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	var partsCopy []UploadPart
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartCopy(imurCopy, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		partsCopy = append(partsCopy, part)
+	}
+
+	// list
+	lupr, err := s.bucket.ListUploadedParts(imurUpload)
+	c.Assert(err, IsNil)
+	testLogger.Println("lupr:", lupr)
+	c.Assert(len(lupr.UploadedParts), Equals, len(chunks))
+
+	lupr, err = s.bucket.ListUploadedParts(imurCopy)
+	c.Assert(err, IsNil)
+	testLogger.Println("lupr:", lupr)
+	c.Assert(len(lupr.UploadedParts), Equals, len(chunks))
+
+	lmur, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+	testLogger.Println("lmur:", lmur)
+	c.Assert(len(lmur.Uploads), Equals, 2)
+
+	// abort
+	err = s.bucket.AbortMultipartUpload(imurUpload)
+	c.Assert(err, IsNil)
+	err = s.bucket.AbortMultipartUpload(imurCopy)
+	c.Assert(err, IsNil)
+
+	lmur, err = s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+	testLogger.Println("lmur:", lmur)
+	c.Assert(len(lmur.Uploads), Equals, 0)
+
+	// download
+	err = s.bucket.GetObjectToFile(objectDesc, "newpic3.jpg")
+	c.Assert(err, NotNil)
+	err = s.bucket.GetObjectToFile(objectName, "newpic4.jpg")
+	c.Assert(err, NotNil)
+}
+
+// TestUploadPartCopyWithConstraints
+func (s *OssBucketMultipartSuite) TestUploadPartCopyWithConstraints(c *C) {
+	objectSrc := objectNamePrefix + "tucwc" + "src"
+	objectDesc := objectNamePrefix + "tucwc" + "desc"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	chunks, err := SplitFileByPartNum(fileName, 3)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
+	c.Assert(err, IsNil)
+
+	imur, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	var parts []UploadPart
+	for _, chunk := range chunks {
+		_, err = s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number),
+			CopySourceIfModifiedSince(futureDate))
+		c.Assert(err, NotNil)
+	}
+
+	for _, chunk := range chunks {
+		_, err = s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number),
+			CopySourceIfUnmodifiedSince(futureDate))
+		c.Assert(err, IsNil)
+	}
+
+	meta, err := s.bucket.GetObjectDetailedMeta(objectSrc)
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta:", meta)
+
+	for _, chunk := range chunks {
+		_, err = s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number),
+			CopySourceIfNoneMatch(meta.Get("Etag")))
+		c.Assert(err, NotNil)
+	}
+
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number),
+			CopySourceIfMatch(meta.Get("Etag")))
+		c.Assert(err, IsNil)
+		parts = append(parts, part)
+	}
+
+	cmur, err := s.bucket.CompleteMultipartUpload(imur, parts)
+	c.Assert(err, IsNil)
+	testLogger.Println("cmur:", cmur)
+
+	err = s.bucket.GetObjectToFile(objectDesc, "newpic5.jpg")
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectSrc)
+	c.Assert(err, IsNil)
+	err = s.bucket.DeleteObject(objectDesc)
+	c.Assert(err, IsNil)
+}
+
+// TestMultipartUploadFromFileOutofOrder
+func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileOutofOrder(c *C) {
+	objectName := objectNamePrefix + "tmuffoo"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	chunks, err := SplitFileByPartSize(fileName, 1024*100)
+	shuffleArray(chunks)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	imur, err := s.bucket.InitiateMultipartUpload(objectName)
+	var parts []UploadPart
+	for _, chunk := range chunks {
+		_, err := s.bucket.UploadPartFromFile(imur, fileName, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+	}
+	// double upload
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartFromFile(imur, fileName, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		parts = append(parts, part)
+	}
+
+	cmur, err := s.bucket.CompleteMultipartUpload(imur, parts)
+	c.Assert(err, IsNil)
+	testLogger.Println("cmur:", cmur)
+
+	err = s.bucket.GetObjectToFile(objectName, "newpic6.jpg")
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+// TestUploadPartCopyOutofOrder
+func (s *OssBucketMultipartSuite) TestUploadPartCopyOutofOrder(c *C) {
+	objectSrc := objectNamePrefix + "tupcoo" + "src"
+	objectDesc := objectNamePrefix + "tupcoo" + "desc"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	chunks, err := SplitFileByPartSize(fileName, 1024*100)
+	shuffleArray(chunks)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
+	c.Assert(err, IsNil)
+
+	imur, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	var parts []UploadPart
+	for _, chunk := range chunks {
+		_, err := s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+	}
+	//double copy
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		parts = append(parts, part)
+	}
+
+	cmur, err := s.bucket.CompleteMultipartUpload(imur, parts)
+	c.Assert(err, IsNil)
+	testLogger.Println("cmur:", cmur)
+
+	err = s.bucket.GetObjectToFile(objectDesc, "newpic7.jpg")
+	c.Assert(err, IsNil)
+
+	err = s.bucket.DeleteObject(objectSrc)
+	c.Assert(err, IsNil)
+	err = s.bucket.DeleteObject(objectDesc)
+	c.Assert(err, IsNil)
+}
+
+// TestMultipartUpload
+func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileType(c *C) {
+	objectName := objectNamePrefix + "tmuffwm" + ".jpg"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	chunks, err := SplitFileByPartNum(fileName, 4)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	imur, err := s.bucket.InitiateMultipartUpload(objectName)
+	var parts []UploadPart
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartFromFile(imur, fileName, chunk.Offset, chunk.Size, chunk.Number)
+		c.Assert(err, IsNil)
+		parts = append(parts, part)
+	}
+
+	testLogger.Println("parts:", parts)
+	cmur, err := s.bucket.CompleteMultipartUpload(imur, parts)
+	c.Assert(err, IsNil)
+	testLogger.Println("cmur:", cmur)
+
+	err = s.bucket.GetObjectToFile(objectName, "newpic8.jpg")
+	c.Assert(err, IsNil)
+
+	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
+	c.Assert(err, IsNil)
+	c.Assert(meta.Get("Content-Type"), Equals, "image/jpeg")
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+func (s *OssBucketMultipartSuite) TestListMultipartUploads(c *C) {
+	objectName := objectNamePrefix + "tlmu"
+
+	imurs := []InitiateMultipartUploadResult{}
+	for i := 0; i < 20; i++ {
+		imur, err := s.bucket.InitiateMultipartUpload(objectName + strconv.Itoa(i))
+		c.Assert(err, IsNil)
+		imurs = append(imurs, imur)
+	}
+
+	lmpu, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 20)
+
+	lmpu, err = s.bucket.ListMultipartUploads(MaxUploads(3))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 3)
+
+	lmpu, err = s.bucket.ListMultipartUploads(Prefix(objectName))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 20)
+
+	lmpu, err = s.bucket.ListMultipartUploads(Prefix(objectName + "1"))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 11)
+
+	lmpu, err = s.bucket.ListMultipartUploads(Prefix(objectName + "22"))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 0)
+
+	lmpu, err = s.bucket.ListMultipartUploads(KeyMarker(objectName + "10"))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 17)
+
+	lmpu, err = s.bucket.ListMultipartUploads(KeyMarker(objectName+"10"), MaxUploads(3))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 3)
+
+	lmpu, err = s.bucket.ListMultipartUploads(Prefix(objectName), Delimiter("4"))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 18)
+	c.Assert(len(lmpu.CommonPrefixes), Equals, 2)
+
+	// upload-id-marker
+	lmpu, err = s.bucket.ListMultipartUploads(KeyMarker(objectName+"12"), UploadIDMarker("EEE"))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 15)
+	//testLogger.Println("UploadIDMarker", lmpu.Uploads)
+
+	for _, imur := range imurs {
+		err = s.bucket.AbortMultipartUpload(imur)
+		c.Assert(err, IsNil)
+	}
+}
+
+func (s *OssBucketMultipartSuite) TestListMultipartUploadsEncodingKey(c *C) {
+	objectName := objectNamePrefix + "让你任性让你狂" + "tlmuek"
+
+	imurs := []InitiateMultipartUploadResult{}
+	for i := 0; i < 3; i++ {
+		imur, err := s.bucket.InitiateMultipartUpload(objectName + strconv.Itoa(i))
+		c.Assert(err, IsNil)
+		imurs = append(imurs, imur)
+	}
+
+	lmpu, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 3)
+
+	lmpu, err = s.bucket.ListMultipartUploads(Prefix(objectNamePrefix + "让你任性让你狂tlmuek1"))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 1)
+
+	lmpu, err = s.bucket.ListMultipartUploads(KeyMarker(objectNamePrefix + "让你任性让你狂tlmuek1"))
+	c.Assert(err, IsNil)
+	c.Assert(len(lmpu.Uploads), Equals, 1)
+
+	lmpu, err = s.bucket.ListMultipartUploads(EncodingType("url"))
+	c.Assert(err, IsNil)
+	for i, upload := range lmpu.Uploads {
+		c.Assert(upload.Key, Equals, objectNamePrefix+"让你任性让你狂tlmuek"+strconv.Itoa(i))
+	}
+
+	for _, imur := range imurs {
+		err = s.bucket.AbortMultipartUpload(imur)
+		c.Assert(err, IsNil)
+	}
+}
+
+func (s *OssBucketMultipartSuite) TestMultipartNegative(c *C) {
+	objectName := objectNamePrefix + "tmn"
+
+	// key tool long
+	data := make([]byte, 100*1024)
+	imur, err := s.bucket.InitiateMultipartUpload(string(data))
+	c.Assert(err, NotNil)
+
+	// imur invalid
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	fd, err := os.Open(fileName)
+	c.Assert(err, IsNil)
+	defer fd.Close()
+
+	_, err = s.bucket.UploadPart(imur, fd, 1024, 1)
+	c.Assert(err, NotNil)
+
+	_, err = s.bucket.UploadPartFromFile(imur, fileName, 0, 1024, 1)
+	c.Assert(err, NotNil)
+
+	_, err = s.bucket.UploadPartCopy(imur, bucketName, fileName, 0, 1024, 1)
+	c.Assert(err, NotNil)
+
+	err = s.bucket.AbortMultipartUpload(imur)
+	c.Assert(err, NotNil)
+
+	_, err = s.bucket.ListUploadedParts(imur)
+	c.Assert(err, NotNil)
+
+	// invalid exist
+	imur, err = s.bucket.InitiateMultipartUpload(objectName)
+	c.Assert(err, IsNil)
+
+	_, err = s.bucket.UploadPart(imur, fd, 1024, 1)
+	c.Assert(err, IsNil)
+
+	_, err = s.bucket.UploadPart(imur, fd, 102400, 10001)
+	c.Assert(err, NotNil)
+
+	//    _, err = s.bucket.UploadPartFromFile(imur, fileName, 0, 1024, 1)
+	//    c.Assert(err, IsNil)
+
+	_, err = s.bucket.UploadPartFromFile(imur, fileName, 0, 102400, 10001)
+	c.Assert(err, NotNil)
+
+	_, err = s.bucket.UploadPartCopy(imur, bucketName, fileName, 0, 1024, 1)
+	c.Assert(err, NotNil)
+
+	_, err = s.bucket.UploadPartCopy(imur, bucketName, fileName, 0, 1024, 1000)
+	c.Assert(err, NotNil)
+
+	err = s.bucket.AbortMultipartUpload(imur)
+	c.Assert(err, IsNil)
+
+	// option invalid
+	_, err = s.bucket.InitiateMultipartUpload(objectName, IfModifiedSince(futureDate))
+	c.Assert(err, IsNil)
+}
+
+func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileBigFile(c *C) {
+	objectName := objectNamePrefix + "tmuffbf"
+	bigFile := "D:\\tmp\\bigfile.zip"
+	newFile := "D:\\tmp\\newbigfile.zip"
+
+	exist, err := isFileExist(bigFile)
+	c.Assert(err, IsNil)
+	if !exist {
+		return
+	}
+
+	chunks, err := SplitFileByPartNum(bigFile, 64)
+	c.Assert(err, IsNil)
+	testLogger.Println("chunks:", chunks)
+
+	imur, err := s.bucket.InitiateMultipartUpload(objectName)
+	var parts []UploadPart
+	start := GetNowSec()
+	for _, chunk := range chunks {
+		part, err := s.bucket.UploadPartFromFile(imur, bigFile, chunk.Offset, chunk.Size, (int)(chunk.Number))
+		c.Assert(err, IsNil)
+		parts = append(parts, part)
+	}
+	end := GetNowSec()
+	testLogger.Println("Uplaod big file:", bigFile, "use sec:", end-start)
+
+	testLogger.Println("parts:", parts)
+	_, err = s.bucket.CompleteMultipartUpload(imur, parts)
+	c.Assert(err, IsNil)
+
+	start = GetNowSec()
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+	end = GetNowSec()
+	testLogger.Println("Download big file:", bigFile, "use sec:", end-start)
+
+	start = GetNowSec()
+	eq, err := compareFiles(bigFile, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+	end = GetNowSec()
+	testLogger.Println("Compare big file:", bigFile, "use sec:", end-start)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+// TestUploadFile
+func (s *OssBucketMultipartSuite) TestUploadFile(c *C) {
+	objectName := objectNamePrefix + "tuff"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "newfiletuff.jpg"
+
+	// 有余数
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024)
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 整除
+	err = s.bucket.UploadFile(objectName, fileName, 482048/4)
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 等于文件大小
+	err = s.bucket.UploadFile(objectName, fileName, 482048)
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 大于文件大小
+	err = s.bucket.UploadFile(objectName, fileName, 482049)
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// option
+	options := []Option{
+		Expires(futureDate),
+		ObjectACL(ACLPublicRead),
+		Meta("myprop", "mypropval")}
+	err = s.bucket.UploadFile(objectName, fileName, 482049, options...)
+	c.Assert(err, IsNil)
+
+	// Check
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	acl, err := s.bucket.GetObjectACL(objectName)
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectAcl:", acl)
+	c.Assert(acl.ACL, Equals, "default")
+
+	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta:", meta)
+	c.Assert(meta.Get("X-Oss-Meta-Myprop"), Equals, "mypropval")
+}
+
+func (s *OssBucketMultipartSuite) TestUploadFileNegative(c *C) {
+	objectName := objectNamePrefix + "tufn"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+
+	// 小于最小文件片
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024-1)
+	c.Assert(err, NotNil)
+
+	// 大于最大文件片
+	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*5+1)
+	c.Assert(err, NotNil)
+
+	// 文件不存在
+	err = s.bucket.UploadFile(objectName, "/root/123abc9874", 1024*1024*1024)
+	c.Assert(err, NotNil)
+
+	// Key无效
+	err = s.bucket.UploadFile("", fileName, 100*1024)
+	c.Assert(err, NotNil)
+}
+
+// TestDownloadFile
+func (s *OssBucketMultipartSuite) TestDownloadFile(c *C) {
+	objectName := objectNamePrefix + "tdff"
+	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "newfiletdff.jpg"
+
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024)
+	c.Assert(err, IsNil)
+
+	// 有余数
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024)
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// 整除
+	err = s.bucket.DownloadFile(objectName, newFile, 482048/4)
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// 等于文件大小
+	err = s.bucket.DownloadFile(objectName, newFile, 482048)
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// 大于文件大小
+	err = s.bucket.DownloadFile(objectName, newFile, 482049)
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// option
+	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta:", meta)
+
+	// If-Match
+	err = s.bucket.DownloadFile(objectName, newFile, 482048/4, IfMatch(meta.Get("Etag")))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	// If-None-Match
+	err = s.bucket.DownloadFile(objectName, newFile, 482048, IfNoneMatch(meta.Get("Etag")))
+	c.Assert(err, NotNil)
+
+	os.Remove(newFile)
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+func (s *OssBucketMultipartSuite) TestDownloadFileNegative(c *C) {
+	objectName := objectNamePrefix + "tufn"
+	newFile := "newfiletudff.jpg"
+
+	// 小于最小文件片
+	err := s.bucket.DownloadFile(objectName, newFile, 100*1024-1)
+	c.Assert(err, NotNil)
+
+	// 大于最大文件片
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024+1)
+	c.Assert(err, NotNil)
+
+	// 文件不存在
+	err = s.bucket.DownloadFile(objectName, "D:\\work\\oss\\", 1024*1024*1024+1)
+	c.Assert(err, NotNil)
+
+	// Key不存在
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024)
+	c.Assert(err, NotNil)
+}
+
+// private
+func shuffleArray(chunks []FileChunk) []FileChunk {
+	for i := range chunks {
+		j := rand.Intn(i + 1)
+		chunks[i], chunks[j] = chunks[j], chunks[i]
+	}
+	return chunks
+}

+ 346 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go

@@ -0,0 +1,346 @@
+package oss
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"sort"
+	"strconv"
+	"time"
+)
+
+type optionType string
+
+const (
+	optionParam optionType = "HTTPParameter" // URL参数
+	optionHTTP  optionType = "HTTPHeader"    // HTTP头
+	optionArg   optionType = "FuncArgument"  // 函数参数
+)
+
+const (
+	deleteObjectsQuiet = "delete-objects-quiet"
+	routineNum         = "x-routine-num"
+	checkpointConfig   = "x-cp-config"
+	initCRC64          = "init-crc64"
+)
+
+type (
+	optionValue struct {
+		Value string
+		Type  optionType
+	}
+
+	// Option http option
+	Option func(map[string]optionValue) error
+)
+
+// ACL is an option to set X-Oss-Acl header
+func ACL(acl ACLType) Option {
+	return setHeader(HTTPHeaderOssACL, string(acl))
+}
+
+// ContentType is an option to set Content-Type header
+func ContentType(value string) Option {
+	return setHeader(HTTPHeaderContentType, value)
+}
+
+// ContentLength is an option to set Content-Length header
+func ContentLength(length int64) Option {
+	return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10))
+}
+
+// CacheControl is an option to set Cache-Control header
+func CacheControl(value string) Option {
+	return setHeader(HTTPHeaderCacheControl, value)
+}
+
+// ContentDisposition is an option to set Content-Disposition header
+func ContentDisposition(value string) Option {
+	return setHeader(HTTPHeaderContentDisposition, value)
+}
+
+// ContentEncoding is an option to set Content-Encoding header
+func ContentEncoding(value string) Option {
+	return setHeader(HTTPHeaderContentEncoding, value)
+}
+
+// ContentMD5 is an option to set Content-MD5 header
+func ContentMD5(value string) Option {
+	return setHeader(HTTPHeaderContentMD5, value)
+}
+
+// Expires is an option to set Expires header
+func Expires(t time.Time) Option {
+	return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat))
+}
+
+// Meta is an option to set Meta header
+func Meta(key, value string) Option {
+	return setHeader(HTTPHeaderOssMetaPrefix+key, value)
+}
+
+// Range is an option to set Range header, [start, end]
+func Range(start, end int64) Option {
+	return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
+}
+
+// AcceptEncoding is an option to set Accept-Encoding header
+func AcceptEncoding(value string) Option {
+	return setHeader(HTTPHeaderAcceptEncoding, value)
+}
+
+// IfModifiedSince is an option to set If-Modified-Since header
+func IfModifiedSince(t time.Time) Option {
+	return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat))
+}
+
+// IfUnmodifiedSince is an option to set If-Unmodified-Since header
+func IfUnmodifiedSince(t time.Time) Option {
+	return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat))
+}
+
+// IfMatch is an option to set If-Match header
+func IfMatch(value string) Option {
+	return setHeader(HTTPHeaderIfMatch, value)
+}
+
+// IfNoneMatch is an option to set IfNoneMatch header
+func IfNoneMatch(value string) Option {
+	return setHeader(HTTPHeaderIfNoneMatch, value)
+}
+
+// CopySource is an option to set X-Oss-Copy-Source header
+func CopySource(sourceBucket, sourceObject string) Option {
+	return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject)
+}
+
+// CopySourceRange is an option to set X-Oss-Copy-Source header
+func CopySourceRange(startPosition, partSize int64) Option {
+	val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" +
+		strconv.FormatInt((startPosition+partSize-1), 10)
+	return setHeader(HTTPHeaderOssCopySourceRange, val)
+}
+
+// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header
+func CopySourceIfMatch(value string) Option {
+	return setHeader(HTTPHeaderOssCopySourceIfMatch, value)
+}
+
+// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header
+func CopySourceIfNoneMatch(value string) Option {
+	return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value)
+}
+
+// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header
+func CopySourceIfModifiedSince(t time.Time) Option {
+	return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat))
+}
+
+// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header
+func CopySourceIfUnmodifiedSince(t time.Time) Option {
+	return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat))
+}
+
+// MetadataDirective is an option to set X-Oss-Metadata-Directive header
+func MetadataDirective(directive MetadataDirectiveType) Option {
+	return setHeader(HTTPHeaderOssMetadataDirective, string(directive))
+}
+
+// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header
+func ServerSideEncryption(value string) Option {
+	return setHeader(HTTPHeaderOssServerSideEncryption, value)
+}
+
+// ObjectACL is an option to set X-Oss-Object-Acl header
+func ObjectACL(acl ACLType) Option {
+	return setHeader(HTTPHeaderOssObjectACL, string(acl))
+}
+
+// Origin is an option to set Origin header
+func Origin(value string) Option {
+	return setHeader(HTTPHeaderOrigin, value)
+}
+
+// Delimiter is an option to set delimiler parameter
+func Delimiter(value string) Option {
+	return addParam("delimiter", value)
+}
+
+// Marker is an option to set marker parameter
+func Marker(value string) Option {
+	return addParam("marker", value)
+}
+
+// MaxKeys is an option to set maxkeys parameter
+func MaxKeys(value int) Option {
+	return addParam("max-keys", strconv.Itoa(value))
+}
+
+// Prefix is an option to set prefix parameter
+func Prefix(value string) Option {
+	return addParam("prefix", value)
+}
+
+// EncodingType is an option to set encoding-type parameter
+func EncodingType(value string) Option {
+	return addParam("encoding-type", value)
+}
+
+// MaxUploads is an option to set max-uploads parameter
+func MaxUploads(value int) Option {
+	return addParam("max-uploads", strconv.Itoa(value))
+}
+
+// KeyMarker is an option to set key-marker parameter
+func KeyMarker(value string) Option {
+	return addParam("key-marker", value)
+}
+
+// UploadIDMarker is an option to set upload-id-marker parameter
+func UploadIDMarker(value string) Option {
+	return addParam("upload-id-marker", value)
+}
+
+// DeleteObjectsQuiet DeleteObjects详细(verbose)模式或简单(quiet)模式,默认详细模式。
+func DeleteObjectsQuiet(isQuiet bool) Option {
+	return addArg(deleteObjectsQuiet, strconv.FormatBool(isQuiet))
+}
+
+type cpConfig struct {
+	IsEnable bool
+	FilePath string
+}
+
+// Checkpoint DownloadFile/UploadFile是否开启checkpoint及checkpoint文件路径
+func Checkpoint(isEnable bool, filePath string) Option {
+	res, _ := json.Marshal(cpConfig{isEnable, filePath})
+	return addArg(checkpointConfig, string(res))
+}
+
+// Routines DownloadFile/UploadFile并发数
+func Routines(n int) Option {
+	return addArg(routineNum, strconv.Itoa(n))
+}
+
+// InitCRC AppendObject CRC的校验的初始值
+func InitCRC(initCRC uint64) Option {
+	return addArg(initCRC64, strconv.FormatUint(initCRC, 10))
+}
+
+func setHeader(key, value string) Option {
+	return func(params map[string]optionValue) error {
+		if value == "" {
+			return nil
+		}
+		params[key] = optionValue{value, optionHTTP}
+		return nil
+	}
+}
+
+func addParam(key, value string) Option {
+	return func(params map[string]optionValue) error {
+		if value == "" {
+			return nil
+		}
+		params[key] = optionValue{value, optionParam}
+		return nil
+	}
+}
+
+func addArg(key, value string) Option {
+	return func(params map[string]optionValue) error {
+		if value == "" {
+			return nil
+		}
+		params[key] = optionValue{value, optionArg}
+		return nil
+	}
+}
+
+func handleOptions(headers map[string]string, options []Option) error {
+	params := map[string]optionValue{}
+	for _, option := range options {
+		if option != nil {
+			if err := option(params); err != nil {
+				return err
+			}
+		}
+	}
+
+	for k, v := range params {
+		if v.Type == optionHTTP {
+			headers[k] = v.Value
+		}
+	}
+	return nil
+}
+
+func handleParams(options []Option) (string, error) {
+	// option
+	params := map[string]optionValue{}
+	for _, option := range options {
+		if option != nil {
+			if err := option(params); err != nil {
+				return "", err
+			}
+		}
+	}
+
+	// sort
+	var buf bytes.Buffer
+	keys := make([]string, 0, len(params))
+	for k, v := range params {
+		if v.Type == optionParam {
+			keys = append(keys, k)
+		}
+	}
+	sort.Strings(keys)
+
+	// serialize
+	for _, k := range keys {
+		vs := params[k]
+		prefix := url.QueryEscape(k) + "="
+
+		if buf.Len() > 0 {
+			buf.WriteByte('&')
+		}
+		buf.WriteString(prefix)
+		buf.WriteString(url.QueryEscape(vs.Value))
+	}
+
+	return buf.String(), nil
+}
+
+func findOption(options []Option, param, defaultVal string) (string, error) {
+	params := map[string]optionValue{}
+	for _, option := range options {
+		if option != nil {
+			if err := option(params); err != nil {
+				return "", err
+			}
+		}
+	}
+
+	if val, ok := params[param]; ok {
+		return val.Value, nil
+	}
+	return defaultVal, nil
+}
+
+func isOptionSet(options []Option, option string) (bool, string, error) {
+	params := map[string]optionValue{}
+	for _, option := range options {
+		if option != nil {
+			if err := option(params); err != nil {
+				return false, "", err
+			}
+		}
+	}
+
+	if val, ok := params[option]; ok {
+		return true, val.Value, nil
+	}
+	return false, "", nil
+}

+ 251 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/option_test.go

@@ -0,0 +1,251 @@
+package oss
+
+import (
+	"net/http"
+
+	. "gopkg.in/check.v1"
+)
+
+type OssOptionSuite struct{}
+
+var _ = Suite(&OssOptionSuite{})
+
+type optionTestCase struct {
+	option Option
+	key    string
+	value  string
+}
+
+var headerTestcases = []optionTestCase{
+	{
+		option: Meta("User", "baymax"),
+		key:    "X-Oss-Meta-User",
+		value:  "baymax",
+	},
+	{
+		option: ACL(ACLPrivate),
+		key:    "X-Oss-Acl",
+		value:  "private",
+	},
+	{
+		option: ContentType("plain/text"),
+		key:    "Content-Type",
+		value:  "plain/text",
+	},
+	{
+		option: CacheControl("no-cache"),
+		key:    "Cache-Control",
+		value:  "no-cache",
+	},
+	{
+		option: ContentDisposition("Attachment; filename=example.txt"),
+		key:    "Content-Disposition",
+		value:  "Attachment; filename=example.txt",
+	},
+	{
+		option: ContentEncoding("gzip"),
+		key:    "Content-Encoding",
+		value:  "gzip",
+	},
+	{
+		option: Expires(pastDate),
+		key:    "Expires",
+		value:  pastDate.Format(http.TimeFormat),
+	},
+	{
+		option: Range(0, 9),
+		key:    "Range",
+		value:  "bytes=0-9",
+	},
+	{
+		option: Origin("localhost"),
+		key:    "Origin",
+		value:  "localhost",
+	},
+	{
+		option: CopySourceRange(0, 9),
+		key:    "X-Oss-Copy-Source-Range",
+		value:  "bytes=0-8",
+	},
+	{
+		option: IfModifiedSince(pastDate),
+		key:    "If-Modified-Since",
+		value:  pastDate.Format(http.TimeFormat),
+	},
+	{
+		option: IfUnmodifiedSince(futureDate),
+		key:    "If-Unmodified-Since",
+		value:  futureDate.Format(http.TimeFormat),
+	},
+	{
+		option: IfMatch("xyzzy"),
+		key:    "If-Match",
+		value:  "xyzzy",
+	},
+	{
+		option: IfNoneMatch("xyzzy"),
+		key:    "If-None-Match",
+		value:  "xyzzy",
+	},
+	{
+		option: CopySource("bucket_name", "object_name"),
+		key:    "X-Oss-Copy-Source",
+		value:  "/bucket_name/object_name",
+	},
+	{
+		option: CopySourceIfModifiedSince(pastDate),
+		key:    "X-Oss-Copy-Source-If-Modified-Since",
+		value:  pastDate.Format(http.TimeFormat),
+	},
+	{
+		option: CopySourceIfUnmodifiedSince(futureDate),
+		key:    "X-Oss-Copy-Source-If-Unmodified-Since",
+		value:  futureDate.Format(http.TimeFormat),
+	},
+	{
+		option: CopySourceIfMatch("xyzzy"),
+		key:    "X-Oss-Copy-Source-If-Match",
+		value:  "xyzzy",
+	},
+	{
+		option: CopySourceIfNoneMatch("xyzzy"),
+		key:    "X-Oss-Copy-Source-If-None-Match",
+		value:  "xyzzy",
+	},
+	{
+		option: MetadataDirective(MetaCopy),
+		key:    "X-Oss-Metadata-Directive",
+		value:  "COPY",
+	},
+	{
+		option: ServerSideEncryption("AES256"),
+		key:    "X-Oss-Server-Side-Encryption",
+		value:  "AES256",
+	},
+	{
+		option: ObjectACL(ACLPrivate),
+		key:    "X-Oss-Object-Acl",
+		value:  "private",
+	},
+}
+
+func (s *OssOptionSuite) TestHeaderOptions(c *C) {
+	for _, testcase := range headerTestcases {
+		headers := make(map[string]optionValue)
+		err := testcase.option(headers)
+		c.Assert(err, IsNil)
+
+		expected, actual := testcase.value, headers[testcase.key].Value
+		c.Assert(expected, Equals, actual)
+	}
+}
+
+var paramTestCases = []optionTestCase{
+	{
+		option: Delimiter("/"),
+		key:    "delimiter",
+		value:  "/",
+	},
+	{
+		option: Marker("abc"),
+		key:    "marker",
+		value:  "abc",
+	},
+	{
+		option: MaxKeys(150),
+		key:    "max-keys",
+		value:  "150",
+	},
+	{
+		option: Prefix("fun"),
+		key:    "prefix",
+		value:  "fun",
+	},
+	{
+		option: EncodingType("ascii"),
+		key:    "encoding-type",
+		value:  "ascii",
+	},
+	{
+		option: MaxUploads(100),
+		key:    "max-uploads",
+		value:  "100",
+	},
+	{
+		option: KeyMarker("abc"),
+		key:    "key-marker",
+		value:  "abc",
+	},
+	{
+		option: UploadIDMarker("xyz"),
+		key:    "upload-id-marker",
+		value:  "xyz",
+	},
+}
+
+func (s *OssOptionSuite) TestParamOptions(c *C) {
+	for _, testcase := range paramTestCases {
+		params := make(map[string]optionValue)
+		err := testcase.option(params)
+		c.Assert(err, IsNil)
+
+		expected, actual := testcase.value, params[testcase.key].Value
+		c.Assert(expected, Equals, actual)
+	}
+}
+
+func (s *OssOptionSuite) TestHandleOptions(c *C) {
+	headers := make(map[string]string)
+	options := []Option{}
+
+	for _, testcase := range headerTestcases {
+		options = append(options, testcase.option)
+	}
+
+	err := handleOptions(headers, options)
+	c.Assert(err, IsNil)
+
+	for _, testcase := range headerTestcases {
+		expected, actual := testcase.value, headers[testcase.key]
+		c.Assert(expected, Equals, actual)
+	}
+
+	options = []Option{IfMatch(""), nil}
+	headers = map[string]string{}
+	err = handleOptions(headers, options)
+	c.Assert(err, IsNil)
+	c.Assert(len(headers), Equals, 0)
+}
+
+func (s *OssOptionSuite) TestHandleParams(c *C) {
+	options := []Option{}
+
+	for _, testcase := range paramTestCases {
+		options = append(options, testcase.option)
+	}
+
+	out, err := handleParams(options)
+	c.Assert(err, IsNil)
+	c.Assert(len(out), Equals, 120)
+
+	options = []Option{KeyMarker(""), nil}
+	out, err = handleParams(options)
+	c.Assert(out, Equals, "")
+	c.Assert(err, IsNil)
+}
+
+func (s *OssOptionSuite) TestFindOption(c *C) {
+	options := []Option{}
+
+	for _, testcase := range headerTestcases {
+		options = append(options, testcase.option)
+	}
+
+	str, err := findOption(options, "X-Oss-Acl", "")
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, "private")
+
+	str, err = findOption(options, "MyProp", "")
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, "")
+}

+ 442 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go

@@ -0,0 +1,442 @@
+package oss
+
+import (
+	"encoding/xml"
+	"net/url"
+	"time"
+)
+
+// ListBucketsResult ListBuckets请求返回的结果
+type ListBucketsResult struct {
+	XMLName     xml.Name           `xml:"ListAllMyBucketsResult"`
+	Prefix      string             `xml:"Prefix"`         // 本次查询结果的前缀
+	Marker      string             `xml:"Marker"`         // 标明查询的起点,未全部返回时有此节点
+	MaxKeys     int                `xml:"MaxKeys"`        // 返回结果的最大数目,未全部返回时有此节点
+	IsTruncated bool               `xml:"IsTruncated"`    // 所有的结果是否已经全部返回
+	NextMarker  string             `xml:"NextMarker"`     // 表示下一次查询的起点
+	Owner       Owner              `xml:"Owner"`          // 拥有者信息
+	Buckets     []BucketProperties `xml:"Buckets>Bucket"` // Bucket列表
+}
+
+// BucketProperties Bucket信息
+type BucketProperties struct {
+	XMLName      xml.Name  `xml:"Bucket"`
+	Name         string    `xml:"Name"`         // Bucket名称
+	Location     string    `xml:"Location"`     // Bucket所在的数据中心
+	CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
+}
+
+// GetBucketACLResult GetBucketACL请求返回的结果
+type GetBucketACLResult struct {
+	XMLName xml.Name `xml:"AccessControlPolicy"`
+	ACL     string   `xml:"AccessControlList>Grant"` // Bucket权限
+	Owner   Owner    `xml:"Owner"`                   // Bucket拥有者信息
+}
+
+// LifecycleConfiguration Bucket的Lifecycle配置
+type LifecycleConfiguration struct {
+	XMLName xml.Name        `xml:"LifecycleConfiguration"`
+	Rules   []LifecycleRule `xml:"Rule"`
+}
+
+// LifecycleRule Lifecycle规则
+type LifecycleRule struct {
+	XMLName    xml.Name            `xml:"Rule"`
+	ID         string              `xml:"ID"`         // 规则唯一的ID
+	Prefix     string              `xml:"Prefix"`     // 规则所适用Object的前缀
+	Status     string              `xml:"Status"`     // 规则是否生效
+	Expiration LifecycleExpiration `xml:"Expiration"` // 规则的过期属性
+}
+
+// LifecycleExpiration 规则的过期属性
+type LifecycleExpiration struct {
+	XMLName xml.Name  `xml:"Expiration"`
+	Days    int       `xml:"Days,omitempty"` // 最后修改时间过后多少天生效
+	Date    time.Time `xml:"Date,omitempty"` // 指定规则何时生效
+}
+
+type lifecycleXML struct {
+	XMLName xml.Name        `xml:"LifecycleConfiguration"`
+	Rules   []lifecycleRule `xml:"Rule"`
+}
+
+type lifecycleRule struct {
+	XMLName    xml.Name            `xml:"Rule"`
+	ID         string              `xml:"ID"`
+	Prefix     string              `xml:"Prefix"`
+	Status     string              `xml:"Status"`
+	Expiration lifecycleExpiration `xml:"Expiration"`
+}
+
+type lifecycleExpiration struct {
+	XMLName xml.Name `xml:"Expiration"`
+	Days    int      `xml:"Days,omitempty"`
+	Date    string   `xml:"Date,omitempty"`
+}
+
+const expirationDateFormat = "2006-01-02T15:04:05.000Z"
+
+func convLifecycleRule(rules []LifecycleRule) []lifecycleRule {
+	rs := []lifecycleRule{}
+	for _, rule := range rules {
+		r := lifecycleRule{}
+		r.ID = rule.ID
+		r.Prefix = rule.Prefix
+		r.Status = rule.Status
+		if rule.Expiration.Date.IsZero() {
+			r.Expiration.Days = rule.Expiration.Days
+		} else {
+			r.Expiration.Date = rule.Expiration.Date.Format(expirationDateFormat)
+		}
+		rs = append(rs, r)
+	}
+	return rs
+}
+
+// BuildLifecycleRuleByDays 指定过期天数构建Lifecycle规则
+func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule {
+	var statusStr = "Enabled"
+	if !status {
+		statusStr = "Disabled"
+	}
+	return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
+		Expiration: LifecycleExpiration{Days: days}}
+}
+
+// BuildLifecycleRuleByDate 指定过期时间构建Lifecycle规则
+func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule {
+	var statusStr = "Enabled"
+	if !status {
+		statusStr = "Disabled"
+	}
+	date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
+	return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
+		Expiration: LifecycleExpiration{Date: date}}
+}
+
+// GetBucketLifecycleResult GetBucketLifecycle请求请求结果
+type GetBucketLifecycleResult LifecycleConfiguration
+
+// RefererXML Referer配置
+type RefererXML struct {
+	XMLName           xml.Name `xml:"RefererConfiguration"`
+	AllowEmptyReferer bool     `xml:"AllowEmptyReferer"`   // 是否允许referer字段为空的请求访问
+	RefererList       []string `xml:"RefererList>Referer"` // referer访问白名单
+}
+
+// GetBucketRefererResult GetBucketReferer请教返回结果
+type GetBucketRefererResult RefererXML
+
+// LoggingXML Logging配置
+type LoggingXML struct {
+	XMLName        xml.Name       `xml:"BucketLoggingStatus"`
+	LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // 访问日志信息容器
+}
+
+type loggingXMLEmpty struct {
+	XMLName xml.Name `xml:"BucketLoggingStatus"`
+}
+
+// LoggingEnabled 访问日志信息容器
+type LoggingEnabled struct {
+	XMLName      xml.Name `xml:"LoggingEnabled"`
+	TargetBucket string   `xml:"TargetBucket"` //存放访问日志的Bucket
+	TargetPrefix string   `xml:"TargetPrefix"` //保存访问日志的文件前缀
+}
+
+// GetBucketLoggingResult GetBucketLogging请求返回结果
+type GetBucketLoggingResult LoggingXML
+
+// WebsiteXML Website配置
+type WebsiteXML struct {
+	XMLName       xml.Name      `xml:"WebsiteConfiguration"`
+	IndexDocument IndexDocument `xml:"IndexDocument"` // 目录URL时添加的索引文件
+	ErrorDocument ErrorDocument `xml:"ErrorDocument"` // 404错误时使用的文件
+}
+
+// IndexDocument 目录URL时添加的索引文件
+type IndexDocument struct {
+	XMLName xml.Name `xml:"IndexDocument"`
+	Suffix  string   `xml:"Suffix"` // 目录URL时添加的索引文件名
+}
+
+// ErrorDocument 404错误时使用的文件
+type ErrorDocument struct {
+	XMLName xml.Name `xml:"ErrorDocument"`
+	Key     string   `xml:"Key"` // 404错误时使用的文件名
+}
+
+// GetBucketWebsiteResult GetBucketWebsite请求返回结果
+type GetBucketWebsiteResult WebsiteXML
+
+// CORSXML CORS配置
+type CORSXML struct {
+	XMLName   xml.Name   `xml:"CORSConfiguration"`
+	CORSRules []CORSRule `xml:"CORSRule"` // CORS规则列表
+}
+
+// CORSRule CORS规则
+type CORSRule struct {
+	XMLName       xml.Name `xml:"CORSRule"`
+	AllowedOrigin []string `xml:"AllowedOrigin"` // 允许的来源,默认通配符"*"
+	AllowedMethod []string `xml:"AllowedMethod"` // 允许的方法
+	AllowedHeader []string `xml:"AllowedHeader"` // 允许的请求头
+	ExposeHeader  []string `xml:"ExposeHeader"`  // 允许的响应头
+	MaxAgeSeconds int      `xml:"MaxAgeSeconds"` // 最大的缓存时间
+}
+
+// GetBucketCORSResult GetBucketCORS请求返回的结果
+type GetBucketCORSResult CORSXML
+
+// GetBucketInfoResult GetBucketInfo请求返回结果
+type GetBucketInfoResult struct {
+	XMLName        xml.Name `xml:"BucketInfo"`
+	BucketInfo     BucketInfo   `xml:"Bucket"`
+}
+
+// BucketInfo Bucket信息
+type BucketInfo struct {
+	XMLName          xml.Name  `xml:"Bucket"`
+	Name             string    `xml:"Name"`                    // Bucket名称
+	Location         string    `xml:"Location"`                // Bucket所在的数据中心
+	CreationDate     time.Time `xml:"CreationDate"`            // Bucket创建时间
+	ExtranetEndpoint string    `xml:"ExtranetEndpoint"`        // Bucket访问的外网域名 
+	IntranetEndpoint string    `xml:"IntranetEndpoint"`        // Bucket访问的内网域名
+	ACL              string    `xml:"AccessControlList>Grant"` // Bucket权限
+	Owner            Owner     `xml:"Owner"`                   // Bucket拥有者信息
+}
+
+// ListObjectsResult ListObjects请求返回结果
+type ListObjectsResult struct {
+	XMLName        xml.Name           `xml:"ListBucketResult"`
+	Prefix         string             `xml:"Prefix"`                // 本次查询结果的开始前缀
+	Marker         string             `xml:"Marker"`                // 这次查询的起点
+	MaxKeys        int                `xml:"MaxKeys"`               // 请求返回结果的最大数目
+	Delimiter      string             `xml:"Delimiter"`             // 对Object名字进行分组的字符
+	IsTruncated    bool               `xml:"IsTruncated"`           // 是否所有的结果都已经返回
+	NextMarker     string             `xml:"NextMarker"`            // 下一次查询的起点
+	Objects        []ObjectProperties `xml:"Contents"`              // Object类别
+	CommonPrefixes []string           `xml:"CommonPrefixes>Prefix"` // 以delimiter结尾并有共同前缀的Object的集合
+}
+
+// ObjectProperties Objecct属性
+type ObjectProperties struct {
+	XMLName      xml.Name  `xml:"Contents"`
+	Key          string    `xml:"Key"`          // Object的Key
+	Type         string    `xml:"Type"`         // Object Type
+	Size         int64     `xml:"Size"`         // Object的长度字节数
+	ETag         string    `xml:"ETag"`         // 标示Object的内容
+	Owner        Owner     `xml:"Owner"`        // 保存Object拥有者信息的容器
+	LastModified time.Time `xml:"LastModified"` // Object最后修改时间
+	StorageClass string    `xml:"StorageClass"` // Object的存储类型,目前只能是Standard
+}
+
+// Owner Bucket/Object的owner
+type Owner struct {
+	XMLName     xml.Name `xml:"Owner"`
+	ID          string   `xml:"ID"`          // 用户ID
+	DisplayName string   `xml:"DisplayName"` // Owner名字
+}
+
+// CopyObjectResult CopyObject请求返回的结果
+type CopyObjectResult struct {
+	XMLName      xml.Name  `xml:"CopyObjectResult"`
+	LastModified time.Time `xml:"LastModified"` // 新Object最后更新时间
+	ETag         string    `xml:"ETag"`         // 新Object的ETag值
+}
+
+// GetObjectACLResult GetObjectACL请求返回的结果
+type GetObjectACLResult GetBucketACLResult
+
+type deleteXML struct {
+	XMLName xml.Name       `xml:"Delete"`
+	Objects []DeleteObject `xml:"Object"` // 删除的所有Object
+	Quiet   bool           `xml:"Quiet"`  // 安静响应模式
+}
+
+// DeleteObject 删除的Object
+type DeleteObject struct {
+	XMLName xml.Name `xml:"Object"`
+	Key     string   `xml:"Key"` // Object名称
+}
+
+// DeleteObjectsResult DeleteObjects请求返回结果
+type DeleteObjectsResult struct {
+	XMLName        xml.Name `xml:"DeleteResult"`
+	DeletedObjects []string `xml:"Deleted>Key"` // 删除的Object列表
+}
+
+// InitiateMultipartUploadResult InitiateMultipartUpload请求返回结果
+type InitiateMultipartUploadResult struct {
+	XMLName  xml.Name `xml:"InitiateMultipartUploadResult"`
+	Bucket   string   `xml:"Bucket"`   // Bucket名称
+	Key      string   `xml:"Key"`      // 上传Object名称
+	UploadID string   `xml:"UploadId"` // 生成的UploadId
+}
+
+// UploadPart 上传/拷贝的分片
+type UploadPart struct {
+	XMLName    xml.Name `xml:"Part"`
+	PartNumber int      `xml:"PartNumber"` // Part编号
+	ETag       string   `xml:"ETag"`       // ETag缓存码
+}
+
+type uploadParts []UploadPart
+
+func (slice uploadParts) Len() int {
+	return len(slice)
+}
+
+func (slice uploadParts) Less(i, j int) bool {
+	return slice[i].PartNumber < slice[j].PartNumber
+}
+
+func (slice uploadParts) Swap(i, j int) {
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+// UploadPartCopyResult 拷贝分片请求返回的结果
+type UploadPartCopyResult struct {
+	XMLName      xml.Name  `xml:"CopyPartResult"`
+	LastModified time.Time `xml:"LastModified"` // 最后修改时间
+	ETag         string    `xml:"ETag"`         // ETag
+}
+
+type completeMultipartUploadXML struct {
+	XMLName xml.Name     `xml:"CompleteMultipartUpload"`
+	Part    []UploadPart `xml:"Part"`
+}
+
+// CompleteMultipartUploadResult 提交分片上传任务返回结果
+type CompleteMultipartUploadResult struct {
+	XMLName  xml.Name `xml:"CompleteMultipartUploadResult"`
+	Location string   `xml:"Location"` // Object的URL
+	Bucket   string   `xml:"Bucket"`   // Bucket名称
+	ETag     string   `xml:"ETag"`     // Object的ETag
+	Key      string   `xml:"Key"`      // Object的名字
+}
+
+// ListUploadedPartsResult ListUploadedParts请求返回结果
+type ListUploadedPartsResult struct {
+	XMLName              xml.Name       `xml:"ListPartsResult"`
+	Bucket               string         `xml:"Bucket"`               // Bucket名称
+	Key                  string         `xml:"Key"`                  // Object名称
+	UploadID             string         `xml:"UploadId"`             // 上传Id
+	NextPartNumberMarker string         `xml:"NextPartNumberMarker"` // 下一个Part的位置
+	MaxParts             int            `xml:"MaxParts"`             // 最大Part个数
+	IsTruncated          bool           `xml:"IsTruncated"`          // 是否完全上传完成
+	UploadedParts        []UploadedPart `xml:"Part"`                 // 已完成的Part
+}
+
+// UploadedPart 该任务已经上传的分片
+type UploadedPart struct {
+	XMLName      xml.Name  `xml:"Part"`
+	PartNumber   int       `xml:"PartNumber"`   // Part编号
+	LastModified time.Time `xml:"LastModified"` // 最后一次修改时间
+	ETag         string    `xml:"ETag"`         // ETag缓存码
+	Size         int       `xml:"Size"`         // Part大小
+}
+
+// ListMultipartUploadResult ListMultipartUpload请求返回结果
+type ListMultipartUploadResult struct {
+	XMLName            xml.Name            `xml:"ListMultipartUploadsResult"`
+	Bucket             string              `xml:"Bucket"`                // Bucket名称
+	Delimiter          string              `xml:"Delimiter"`             // 分组分割符
+	Prefix             string              `xml:"Prefix"`                // 筛选前缀
+	KeyMarker          string              `xml:"KeyMarker"`             // 起始Object位置
+	UploadIDMarker     string              `xml:"UploadIdMarker"`        // 起始UploadId位置
+	NextKeyMarker      string              `xml:"NextKeyMarker"`         // 如果没有全部返回,标明接下去的KeyMarker位置
+	NextUploadIDMarker string              `xml:"NextUploadIdMarker"`    // 如果没有全部返回,标明接下去的UploadId位置
+	MaxUploads         int                 `xml:"MaxUploads"`            // 返回最大Upload数目
+	IsTruncated        bool                `xml:"IsTruncated"`           // 是否完全返回
+	Uploads            []UncompletedUpload `xml:"Upload"`                // 未完成上传的MultipartUpload
+	CommonPrefixes     []string            `xml:"CommonPrefixes>Prefix"` // 所有名字包含指定的前缀且第一次出现delimiter字符之间的object作为一组的分组结果
+}
+
+// UncompletedUpload 未完成的Upload任务
+type UncompletedUpload struct {
+	XMLName   xml.Name  `xml:"Upload"`
+	Key       string    `xml:"Key"`       // Object名称
+	UploadID  string    `xml:"UploadId"`  // 对应UploadId
+	Initiated time.Time `xml:"Initiated"` // 初始化时间,格式2012-02-23T04:18:23.000Z
+}
+
+// 解析URL编码
+func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
+	var err error
+	for i := 0; i < len(result.DeletedObjects); i++ {
+		result.DeletedObjects[i], err = url.QueryUnescape(result.DeletedObjects[i])
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// 解析URL编码
+func decodeListObjectsResult(result *ListObjectsResult) error {
+	var err error
+	result.Prefix, err = url.QueryUnescape(result.Prefix)
+	if err != nil {
+		return err
+	}
+	result.Marker, err = url.QueryUnescape(result.Marker)
+	if err != nil {
+		return err
+	}
+	result.Delimiter, err = url.QueryUnescape(result.Delimiter)
+	if err != nil {
+		return err
+	}
+	result.NextMarker, err = url.QueryUnescape(result.NextMarker)
+	if err != nil {
+		return err
+	}
+	for i := 0; i < len(result.Objects); i++ {
+		result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key)
+		if err != nil {
+			return err
+		}
+	}
+	for i := 0; i < len(result.CommonPrefixes); i++ {
+		result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// 解析URL编码
+func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
+	var err error
+	result.Prefix, err = url.QueryUnescape(result.Prefix)
+	if err != nil {
+		return err
+	}
+	result.Delimiter, err = url.QueryUnescape(result.Delimiter)
+	if err != nil {
+		return err
+	}
+	result.KeyMarker, err = url.QueryUnescape(result.KeyMarker)
+	if err != nil {
+		return err
+	}
+	result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker)
+	if err != nil {
+		return err
+	}
+	for i := 0; i < len(result.Uploads); i++ {
+		result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key)
+		if err != nil {
+			return err
+		}
+	}
+	for i := 0; i < len(result.CommonPrefixes); i++ {
+		result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}

+ 127 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/type_test.go

@@ -0,0 +1,127 @@
+package oss
+
+import (
+	"net/url"
+	"sort"
+
+	. "gopkg.in/check.v1"
+)
+
+type OssTypeSuite struct{}
+
+var _ = Suite(&OssTypeSuite{})
+
+var (
+	goStr     = "go go + go <> go"
+	chnStr    = "试问闲情几许"
+	goURLStr  = url.QueryEscape(goStr)
+	chnURLStr = url.QueryEscape(chnStr)
+)
+
+func (s *OssTypeSuite) TestConvLifecycleRule(c *C) {
+	r1 := BuildLifecycleRuleByDate("id1", "one", true, 2015, 11, 11)
+	r2 := BuildLifecycleRuleByDays("id2", "two", false, 3)
+
+	rs := convLifecycleRule([]LifecycleRule{r1})
+	c.Assert(rs[0].ID, Equals, "id1")
+	c.Assert(rs[0].Prefix, Equals, "one")
+	c.Assert(rs[0].Status, Equals, "Enabled")
+	c.Assert(rs[0].Expiration.Date, Equals, "2015-11-11T00:00:00.000Z")
+	c.Assert(rs[0].Expiration.Days, Equals, 0)
+
+	rs = convLifecycleRule([]LifecycleRule{r2})
+	c.Assert(rs[0].ID, Equals, "id2")
+	c.Assert(rs[0].Prefix, Equals, "two")
+	c.Assert(rs[0].Status, Equals, "Disabled")
+	c.Assert(rs[0].Expiration.Date, Equals, "")
+	c.Assert(rs[0].Expiration.Days, Equals, 3)
+}
+
+func (s *OssTypeSuite) TestDecodeDeleteObjectsResult(c *C) {
+	var res DeleteObjectsResult
+	err := decodeDeleteObjectsResult(&res)
+	c.Assert(err, IsNil)
+
+	res.DeletedObjects = []string{""}
+	err = decodeDeleteObjectsResult(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res.DeletedObjects[0], Equals, "")
+
+	res.DeletedObjects = []string{goURLStr, chnURLStr}
+	err = decodeDeleteObjectsResult(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res.DeletedObjects[0], Equals, goStr)
+	c.Assert(res.DeletedObjects[1], Equals, chnStr)
+}
+
+func (s *OssTypeSuite) TestDecodeListObjectsResult(c *C) {
+	var res ListObjectsResult
+	err := decodeListObjectsResult(&res)
+	c.Assert(err, IsNil)
+
+	res = ListObjectsResult{}
+	err = decodeListObjectsResult(&res)
+	c.Assert(err, IsNil)
+
+	res = ListObjectsResult{Prefix: goURLStr, Marker: goURLStr,
+		Delimiter: goURLStr, NextMarker: goURLStr,
+		Objects:        []ObjectProperties{{Key: chnURLStr}},
+		CommonPrefixes: []string{chnURLStr}}
+
+	err = decodeListObjectsResult(&res)
+	c.Assert(err, IsNil)
+
+	c.Assert(res.Prefix, Equals, goStr)
+	c.Assert(res.Marker, Equals, goStr)
+	c.Assert(res.Delimiter, Equals, goStr)
+	c.Assert(res.NextMarker, Equals, goStr)
+	c.Assert(res.Objects[0].Key, Equals, chnStr)
+	c.Assert(res.CommonPrefixes[0], Equals, chnStr)
+}
+
+func (s *OssTypeSuite) TestDecodeListMultipartUploadResult(c *C) {
+	res := ListMultipartUploadResult{}
+	err := decodeListMultipartUploadResult(&res)
+	c.Assert(err, IsNil)
+
+	res = ListMultipartUploadResult{Prefix: goURLStr, KeyMarker: goURLStr,
+		Delimiter: goURLStr, NextKeyMarker: goURLStr,
+		Uploads: []UncompletedUpload{{Key: chnURLStr}}}
+
+	err = decodeListMultipartUploadResult(&res)
+	c.Assert(err, IsNil)
+
+	c.Assert(res.Prefix, Equals, goStr)
+	c.Assert(res.KeyMarker, Equals, goStr)
+	c.Assert(res.Delimiter, Equals, goStr)
+	c.Assert(res.NextKeyMarker, Equals, goStr)
+	c.Assert(res.Uploads[0].Key, Equals, chnStr)
+}
+
+func (s *OssTypeSuite) TestSortUploadPart(c *C) {
+	parts := []UploadPart{}
+
+	sort.Sort(uploadParts(parts))
+	c.Assert(len(parts), Equals, 0)
+
+	parts = []UploadPart{
+		{PartNumber: 5, ETag: "E5"},
+		{PartNumber: 1, ETag: "E1"},
+		{PartNumber: 4, ETag: "E4"},
+		{PartNumber: 2, ETag: "E2"},
+		{PartNumber: 3, ETag: "E3"},
+	}
+
+	sort.Sort(uploadParts(parts))
+
+	c.Assert(parts[0].PartNumber, Equals, 1)
+	c.Assert(parts[0].ETag, Equals, "E1")
+	c.Assert(parts[1].PartNumber, Equals, 2)
+	c.Assert(parts[1].ETag, Equals, "E2")
+	c.Assert(parts[2].PartNumber, Equals, 3)
+	c.Assert(parts[2].ETag, Equals, "E3")
+	c.Assert(parts[3].PartNumber, Equals, 4)
+	c.Assert(parts[3].ETag, Equals, "E4")
+	c.Assert(parts[4].PartNumber, Equals, 5)
+	c.Assert(parts[4].ETag, Equals, "E5")
+}

+ 438 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go

@@ -0,0 +1,438 @@
+package oss
+
+import (
+	"crypto/md5"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"io/ioutil"
+	"os"
+	"strconv"
+	"time"
+)
+
+//
+// UploadFile 分片上传文件
+//
+// objectKey  object名称。
+// filePath   本地文件。需要上传的文件。
+// partSize   本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。
+// options    上传Object时可以指定Object的属性。详见InitiateMultipartUpload。
+//
+// error 操作成功为nil,非nil为错误信息。
+//
+func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
+	if partSize < MinPartSize || partSize > MaxPartSize {
+		return errors.New("oss: part size invalid range (1024KB, 5GB]")
+	}
+
+	cpConf, err := getCpConfig(options, filePath)
+	if err != nil {
+		return err
+	}
+
+	routines := getRoutines(options)
+
+	if cpConf.IsEnable {
+		return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
+	}
+
+	return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
+}
+
+// ----- 并发无断点的上传  -----
+
+// 获取Checkpoint配置
+func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
+	cpc := cpConfig{}
+	cpStr, err := findOption(options, checkpointConfig, "")
+	if err != nil {
+		return nil, err
+	}
+
+	if cpStr != "" {
+		if err = json.Unmarshal([]byte(cpStr), &cpc); err != nil {
+			return nil, err
+		}
+	}
+
+	if cpc.IsEnable && cpc.FilePath == "" {
+		cpc.FilePath = filePath + ".cp"
+	}
+
+	return &cpc, nil
+}
+
+// 获取并发数,默认并发数1
+func getRoutines(options []Option) int {
+	rStr, err := findOption(options, routineNum, "")
+	if err != nil || rStr == "" {
+		return 1
+	}
+
+	rs, err := strconv.Atoi(rStr)
+	if err != nil {
+		return 1
+	}
+
+	if rs < 1 {
+		rs = 1
+	} else if rs > 100 {
+		rs = 100
+	}
+
+	return rs
+}
+
+// 测试使用
+type uploadPartHook func(id int, chunk FileChunk) error
+
+var uploadPartHooker uploadPartHook = defaultUploadPart
+
+func defaultUploadPart(id int, chunk FileChunk) error {
+	return nil
+}
+
+// 工作协程参数
+type workerArg struct {
+	bucket   *Bucket
+	filePath string
+	imur     InitiateMultipartUploadResult
+	hook     uploadPartHook
+}
+
+// 工作协程
+func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <- chan bool) {
+	for chunk := range jobs {
+		if err := arg.hook(id, chunk); err != nil {
+			failed <- err
+			break
+		}
+		part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number)
+		if err != nil {
+			failed <- err
+			break
+		}
+		select {
+			case <-die:
+				return
+			default:
+		}
+		results <- part
+	}
+}
+
+// 调度协程
+func scheduler(jobs chan FileChunk, chunks []FileChunk) {
+	for _, chunk := range chunks {
+		jobs <- chunk
+	}
+	close(jobs)
+}
+
+// 并发上传,不带断点续传功能
+func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
+	chunks, err := SplitFileByPartSize(filePath, partSize)
+	if err != nil {
+		return err
+	}
+
+	// 初始化上传任务
+	imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
+	if err != nil {
+		return err
+	}
+
+	jobs := make(chan FileChunk, len(chunks))
+	results := make(chan UploadPart, len(chunks))
+	failed := make(chan error)
+	die := make(chan bool)
+
+	// 启动工作协程
+	arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
+	for w := 1; w <= routines; w++ {
+		go worker(w, arg, jobs, results, failed, die)
+	}
+
+	// 并发上传分片
+	go scheduler(jobs, chunks)
+
+	// 等待分配分片上传完成
+	completed := 0
+	parts := make([]UploadPart, len(chunks))
+	for completed < len(chunks) {
+		select {
+		case part := <-results:
+			completed++
+			parts[part.PartNumber-1] = part
+		case err := <-failed:
+			close(die)
+			bucket.AbortMultipartUpload(imur)
+			return err
+		}
+
+		if completed >= len(chunks) {
+			break
+		}
+	}
+
+	// 提交任务
+	_, err = bucket.CompleteMultipartUpload(imur, parts)
+	if err != nil {
+		bucket.AbortMultipartUpload(imur)
+		return err
+	}
+	return nil
+}
+
+// ----- 并发带断点的上传  -----
+const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
+
+type uploadCheckpoint struct {
+	Magic     string   // magic
+	MD5       string   // cp内容的MD5
+	FilePath  string   // 本地文件
+	FileStat  cpStat   // 文件状态
+	ObjectKey string   // key
+	UploadID  string   // upload id
+	Parts     []cpPart // 本地文件的全部分片
+}
+
+type cpStat struct {
+	Size         int64     // 文件大小
+	LastModified time.Time // 本地文件最后修改时间
+	MD5          string    // 本地文件MD5
+}
+
+type cpPart struct {
+	Chunk       FileChunk  // 分片
+	Part        UploadPart // 上传完成的分片
+	IsCompleted bool       // upload是否完成
+}
+
+// CP数据是否有效,CP有效且文件没有更新时有效
+func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
+	// 比较CP的Magic及MD5
+	cpb := cp
+	cpb.MD5 = ""
+	js, _ := json.Marshal(cpb)
+	sum := md5.Sum(js)
+	b64 := base64.StdEncoding.EncodeToString(sum[:])
+
+	if cp.Magic != uploadCpMagic || b64 != cp.MD5 {
+		return false, nil
+	}
+
+	// 确认本地文件是否更新
+	fd, err := os.Open(filePath)
+	if err != nil {
+		return false, err
+	}
+	defer fd.Close()
+
+	st, err := fd.Stat()
+	if err != nil {
+		return false, err
+	}
+
+	md, err := calcFileMD5(filePath)
+	if err != nil {
+		return false, err
+	}
+
+	// 比较文件大小/文件最后更新时间/文件MD5
+	if cp.FileStat.Size != st.Size() ||
+		cp.FileStat.LastModified != st.ModTime() ||
+		cp.FileStat.MD5 != md {
+		return false, nil
+	}
+
+	return true, nil
+}
+
+// 从文件中load
+func (cp *uploadCheckpoint) load(filePath string) error {
+	contents, err := ioutil.ReadFile(filePath)
+	if err != nil {
+		return err
+	}
+
+	err = json.Unmarshal(contents, cp)
+	return err
+}
+
+// dump到文件
+func (cp *uploadCheckpoint) dump(filePath string) error {
+	bcp := *cp
+
+	// 计算MD5
+	bcp.MD5 = ""
+	js, err := json.Marshal(bcp)
+	if err != nil {
+		return err
+	}
+	sum := md5.Sum(js)
+	b64 := base64.StdEncoding.EncodeToString(sum[:])
+	bcp.MD5 = b64
+
+	// 序列化
+	js, err = json.Marshal(bcp)
+	if err != nil {
+		return err
+	}
+
+	// dump
+	return ioutil.WriteFile(filePath, js, 0644)
+}
+
+// 更新分片状态
+func (cp *uploadCheckpoint) updatePart(part UploadPart) {
+	cp.Parts[part.PartNumber-1].Part = part
+	cp.Parts[part.PartNumber-1].IsCompleted = true
+}
+
+// 未完成的分片
+func (cp *uploadCheckpoint) todoParts() []FileChunk {
+	fcs := []FileChunk{}
+	for _, part := range cp.Parts {
+		if !part.IsCompleted {
+			fcs = append(fcs, part.Chunk)
+		}
+	}
+	return fcs
+}
+
+// 所有的分片
+func (cp *uploadCheckpoint) allParts() []UploadPart {
+	ps := []UploadPart{}
+	for _, part := range cp.Parts {
+		ps = append(ps, part.Part)
+	}
+	return ps
+}
+
+// 计算文件文件MD5
+func calcFileMD5(filePath string) (string, error) {
+	return "", nil
+}
+
+// 初始化分片上传
+func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
+	// cp
+	cp.Magic = uploadCpMagic
+	cp.FilePath = filePath
+	cp.ObjectKey = objectKey
+
+	// localfile
+	fd, err := os.Open(filePath)
+	if err != nil {
+		return err
+	}
+	defer fd.Close()
+
+	st, err := fd.Stat()
+	if err != nil {
+		return err
+	}
+	cp.FileStat.Size = st.Size()
+	cp.FileStat.LastModified = st.ModTime()
+	md, err := calcFileMD5(filePath)
+	if err != nil {
+		return err
+	}
+	cp.FileStat.MD5 = md
+
+	// chunks
+	parts, err := SplitFileByPartSize(filePath, partSize)
+	if err != nil {
+		return err
+	}
+
+	cp.Parts = make([]cpPart, len(parts))
+	for i, part := range parts {
+		cp.Parts[i].Chunk = part
+		cp.Parts[i].IsCompleted = false
+	}
+
+	// init load
+	imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
+	if err != nil {
+		return err
+	}
+	cp.UploadID = imur.UploadID
+
+	return nil
+}
+
+// 提交分片上传,删除CP文件
+func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error {
+	imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
+		Key: cp.ObjectKey, UploadID: cp.UploadID}
+	_, err := bucket.CompleteMultipartUpload(imur, parts)
+	if err != nil {
+		return err
+	}
+	os.Remove(cpFilePath)
+	return err
+}
+
+// 并发带断点的上传
+func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
+	// LOAD CP数据
+	ucp := uploadCheckpoint{}
+	err := ucp.load(cpFilePath)
+	if err != nil {
+		os.Remove(cpFilePath)
+	}
+
+	// LOAD出错或数据无效重新初始化上传
+	valid, err := ucp.isValid(filePath)
+	if err != nil || !valid {
+		if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
+			return err
+		}
+		os.Remove(cpFilePath)
+	}
+
+	chunks := ucp.todoParts()
+	imur := InitiateMultipartUploadResult{
+		Bucket:   bucket.BucketName,
+		Key:      objectKey,
+		UploadID: ucp.UploadID}
+
+	jobs := make(chan FileChunk, len(chunks))
+	results := make(chan UploadPart, len(chunks))
+	failed := make(chan error)
+	die := make(chan bool)
+
+	// 启动工作协程
+	arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
+	for w := 1; w <= routines; w++ {
+		go worker(w, arg, jobs, results, failed, die)
+	}
+
+	// 并发上传分片
+	go scheduler(jobs, chunks)
+
+	// 等待分配分片上传完成
+	completed := 0
+	for completed < len(chunks) {
+		select {
+		case part := <-results:
+			completed++
+			ucp.updatePart(part)
+			ucp.dump(cpFilePath)
+		case err := <-failed:
+			close(die)
+			return err
+		}
+
+		if completed >= len(chunks) {
+			break
+		}
+	}
+
+	// 提交分片上传
+	err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath)
+	return err
+}

+ 447 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/upload_test.go

@@ -0,0 +1,447 @@
+package oss
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"time"
+
+	. "gopkg.in/check.v1"
+)
+
+type OssUploadSuite struct {
+	client *Client
+	bucket *Bucket
+}
+
+var _ = Suite(&OssUploadSuite{})
+
+// Run once when the suite starts running
+func (s *OssUploadSuite) SetUpSuite(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+	s.client = client
+
+	s.client.CreateBucket(bucketName)
+	time.Sleep(5 * time.Second)
+
+	bucket, err := s.client.Bucket(bucketName)
+	c.Assert(err, IsNil)
+	s.bucket = bucket
+
+	testLogger.Println("test upload started")
+}
+
+// Run before each test or benchmark starts running
+func (s *OssUploadSuite) TearDownSuite(c *C) {
+	// Delete Part
+	lmur, err := s.bucket.ListMultipartUploads()
+	c.Assert(err, IsNil)
+
+	for _, upload := range lmur.Uploads {
+		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+			Key: upload.Key, UploadID: upload.UploadID}
+		err = s.bucket.AbortMultipartUpload(imur)
+		c.Assert(err, IsNil)
+	}
+
+	// Delete Objects
+	lor, err := s.bucket.ListObjects()
+	c.Assert(err, IsNil)
+
+	for _, object := range lor.Objects {
+		err = s.bucket.DeleteObject(object.Key)
+		c.Assert(err, IsNil)
+	}
+
+	testLogger.Println("test upload completed")
+}
+
+// Run after each test or benchmark runs
+func (s *OssUploadSuite) SetUpTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+}
+
+// Run once after all tests or benchmarks have finished running
+func (s *OssUploadSuite) TearDownTest(c *C) {
+	err := removeTempFiles("../oss", ".jpg")
+	c.Assert(err, IsNil)
+}
+
+// TestUploadRoutineWithoutRecovery 多线程无断点恢复的上传
+func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
+	objectName := objectNamePrefix + "turwr"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "upload-new-file.jpg"
+
+	// 不指定Routines,默认单线程
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024)
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 指定线程数1
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(1))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 指定线程数3,小于分片数5
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 指定线程数5,等于分片数
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(5))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 指定线程数10,大于分片数5
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(10))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 线程值无效自动变成1
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(0))
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 线程值无效自动变成1
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(-1))
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// option
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Meta("myprop", "mypropval"))
+
+	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
+	c.Assert(err, IsNil)
+	c.Assert(meta.Get("X-Oss-Meta-Myprop"), Equals, "mypropval")
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+// ErrorHooker UploadPart请求Hook
+func ErrorHooker(id int, chunk FileChunk) error {
+	if chunk.Number == 5 {
+		time.Sleep(time.Second)
+		return fmt.Errorf("ErrorHooker")
+	}
+	return nil
+}
+
+// TestUploadRoutineWithoutRecovery 多线程无断点恢复的上传
+func (s *OssUploadSuite) TestUploadRoutineWithoutRecoveryNegative(c *C) {
+	objectName := objectNamePrefix + "turwrn"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+
+	uploadPartHooker = ErrorHooker
+	// worker线程错误
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(2))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	uploadPartHooker = defaultUploadPart
+
+	// 本地文件不存在
+	err = s.bucket.UploadFile(objectName, "NotExist", 100*1024, Routines(2))
+	c.Assert(err, NotNil)
+
+	// 指定的分片大小无效
+	err = s.bucket.UploadFile(objectName, fileName, 1024, Routines(2))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Routines(2))
+	c.Assert(err, NotNil)
+}
+
+// TestUploadRoutineWithRecovery 多线程且有断点恢复的上传
+func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
+	objectName := objectNamePrefix + "turtr"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := "upload-new-file-2.jpg"
+
+	// Routines默认值,CP开启默认路径是fileName+.cp
+	// 第一次上传,上传4片
+	uploadPartHooker = ErrorHooker
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	uploadPartHooker = defaultUploadPart
+
+	// check cp
+	ucp := uploadCheckpoint{}
+	err = ucp.load(fileName + ".cp")
+	c.Assert(err, IsNil)
+	c.Assert(ucp.Magic, Equals, uploadCpMagic)
+	c.Assert(len(ucp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
+	c.Assert(ucp.FilePath, Equals, fileName)
+	c.Assert(ucp.FileStat.Size, Equals, int64(482048))
+	c.Assert(len(ucp.FileStat.LastModified.String()), Equals, len("2015-12-17 18:43:03 +0800 CST"))
+	c.Assert(ucp.FileStat.MD5, Equals, "")
+	c.Assert(ucp.ObjectKey, Equals, objectName)
+	c.Assert(len(ucp.UploadID), Equals, len("3F79722737D1469980DACEDCA325BB52"))
+	c.Assert(len(ucp.Parts), Equals, 5)
+	c.Assert(len(ucp.todoParts()), Equals, 1)
+	c.Assert(len(ucp.allParts()), Equals, 5)
+
+	// 第二次上传,完成剩余的一片
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	err = ucp.load(fileName + ".cp")
+	c.Assert(err, NotNil)
+
+	// Routines指定,CP指定
+	uploadPartHooker = ErrorHooker
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(2), Checkpoint(true, objectName+".cp"))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	uploadPartHooker = defaultUploadPart
+
+	// check cp
+	ucp = uploadCheckpoint{}
+	err = ucp.load(objectName + ".cp")
+	c.Assert(err, IsNil)
+	c.Assert(ucp.Magic, Equals, uploadCpMagic)
+	c.Assert(len(ucp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
+	c.Assert(ucp.FilePath, Equals, fileName)
+	c.Assert(ucp.FileStat.Size, Equals, int64(482048))
+	c.Assert(len(ucp.FileStat.LastModified.String()), Equals, len("2015-12-17 18:43:03 +0800 CST"))
+	c.Assert(ucp.FileStat.MD5, Equals, "")
+	c.Assert(ucp.ObjectKey, Equals, objectName)
+	c.Assert(len(ucp.UploadID), Equals, len("3F79722737D1469980DACEDCA325BB52"))
+	c.Assert(len(ucp.Parts), Equals, 5)
+	c.Assert(len(ucp.todoParts()), Equals, 1)
+	c.Assert(len(ucp.allParts()), Equals, 5)
+
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, objectName+".cp"))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	err = ucp.load(objectName + ".cp")
+	c.Assert(err, NotNil)
+
+	// 一次完成上传,中间没有错误
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// 用多协程下载,中间没有错误
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(10), Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	// option
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, ""), Meta("myprop", "mypropval"))
+
+	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
+	c.Assert(err, IsNil)
+	c.Assert(meta.Get("X-Oss-Meta-Myprop"), Equals, "mypropval")
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err = compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+// TestUploadRoutineWithoutRecovery 多线程无断点恢复的上传
+func (s *OssUploadSuite) TestUploadRoutineWithRecoveryNegative(c *C) {
+	objectName := objectNamePrefix + "turrn"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+
+	// 本地文件不存在
+	err := s.bucket.UploadFile(objectName, "NotExist", 100*1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.UploadFile(objectName, "NotExist", 100*1024, Routines(2), Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	// 指定的分片大小无效
+	err = s.bucket.UploadFile(objectName, fileName, 1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.UploadFile(objectName, fileName, 1024, Routines(2), Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+
+	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Routines(2), Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+}
+
+// TestUploadLocalFileChange 上传过程中文件修改了
+func (s *OssUploadSuite) TestUploadLocalFileChange(c *C) {
+	objectName := objectNamePrefix + "tulfc"
+	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
+	localFile := "BingWallpaper-2015-11-07.jpg"
+	newFile := "upload-new-file-3.jpg"
+
+	os.Remove(localFile)
+	err := copyFile(fileName, localFile)
+	c.Assert(err, IsNil)
+
+	// 第一次上传,上传4片
+	uploadPartHooker = ErrorHooker
+	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	uploadPartHooker = defaultUploadPart
+
+	os.Remove(localFile)
+	err = copyFile(fileName, localFile)
+	c.Assert(err, IsNil)
+
+	// 文件修改,第二次上传全部分片重新上传
+	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
+	err = s.bucket.GetObjectToFile(objectName, newFile)
+	c.Assert(err, IsNil)
+
+	eq, err := compareFiles(fileName, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	err = s.bucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+}
+
+func copyFile(src, dst string) error {
+	srcFile, err := os.Open(src)
+	if err != nil {
+		return err
+	}
+	defer srcFile.Close()
+
+	dstFile, err := os.Create(dst)
+	if err != nil {
+		return err
+	}
+	defer dstFile.Close()
+
+	_, err = io.Copy(dstFile, srcFile)
+	return err
+}

+ 165 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go

@@ -0,0 +1,165 @@
+package oss
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"hash/crc64"
+	"net/http"
+	"os"
+	"os/exec"
+	"runtime"
+	"time"
+)
+
+// Get User Agent
+// Go sdk相关信息,包括sdk版本,操作系统类型,GO版本
+var userAgent = func() string {
+	sys := getSysInfo()
+	return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
+		sys.release, sys.machine, runtime.Version())
+}()
+
+type sysInfo struct {
+	name    string // 操作系统名称windows/Linux
+	release string // 操作系统版本 2.6.32-220.23.2.ali1089.el5.x86_64等
+	machine string // 机器类型amd64/x86_64
+}
+
+// Get system info
+// 获取操作系统信息、机器类型
+func getSysInfo() sysInfo {
+	name := runtime.GOOS
+	release := "-"
+	machine := runtime.GOARCH
+	if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil {
+		name = string(bytes.TrimSpace(out))
+	}
+	if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil {
+		release = string(bytes.TrimSpace(out))
+	}
+	if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil {
+		machine = string(bytes.TrimSpace(out))
+	}
+	return sysInfo{name: name, release: release, machine: machine}
+}
+
+// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
+// 获取当前时间,从UTC开始的秒数。
+func GetNowSec() int64 {
+	return time.Now().Unix()
+}
+
+// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC. The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64. Note that this
+// means the result of calling UnixNano on the zero Time is undefined.
+// 获取当前时间,从UTC开始的纳秒。
+func GetNowNanoSec() int64 {
+	return time.Now().UnixNano()
+}
+
+// GetNowGMT 获取当前时间,格式形如"Mon, 02 Jan 2006 15:04:05 GMT",HTTP中使用的时间格式
+func GetNowGMT() string {
+	return time.Now().UTC().Format(http.TimeFormat)
+}
+
+// FileChunk 文件片定义
+type FileChunk struct {
+	Number int   // 块序号
+	Offset int64 // 块在文件中的偏移量
+	Size   int64 // 块大小
+}
+
+// SplitFileByPartNum Split big file to part by the num of part
+// 按指定的块数分割文件。返回值FileChunk为分割结果,error为nil时有效。
+func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
+	if chunkNum <= 0 || chunkNum > 10000 {
+		return nil, errors.New("chunkNum invalid")
+	}
+
+	file, err := os.Open(fileName)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	stat, err := file.Stat()
+	if err != nil {
+		return nil, err
+	}
+
+	if int64(chunkNum) > stat.Size() {
+		return nil, errors.New("oss: chunkNum invalid")
+	}
+
+	var chunks []FileChunk
+	var chunk = FileChunk{}
+	var chunkN = (int64)(chunkNum)
+	for i := int64(0); i < chunkN; i++ {
+		chunk.Number = int(i + 1)
+		chunk.Offset = i * (stat.Size() / chunkN)
+		if i == chunkN-1 {
+			chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN
+		} else {
+			chunk.Size = stat.Size() / chunkN
+		}
+		chunks = append(chunks, chunk)
+	}
+
+	return chunks, nil
+}
+
+// SplitFileByPartSize Split big file to part by the size of part
+// 按块大小分割文件。返回值FileChunk为分割结果,error为nil时有效。
+func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
+	if chunkSize <= 0 {
+		return nil, errors.New("chunkSize invalid")
+	}
+
+	file, err := os.Open(fileName)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	stat, err := file.Stat()
+	if err != nil {
+		return nil, err
+	}
+	var chunkN = stat.Size() / chunkSize
+	if chunkN >= 10000 {
+		return nil, errors.New("Too many parts, please increase part size.")
+	}
+
+	var chunks []FileChunk
+	var chunk = FileChunk{}
+	for i := int64(0); i < chunkN; i++ {
+		chunk.Number = int(i + 1)
+		chunk.Offset = i * chunkSize
+		chunk.Size = chunkSize
+		chunks = append(chunks, chunk)
+	}
+
+	if stat.Size()%chunkSize > 0 {
+		chunk.Number = len(chunks) + 1
+		chunk.Offset = int64(len(chunks)) * chunkSize
+		chunk.Size = stat.Size() % chunkSize
+		chunks = append(chunks, chunk)
+	}
+
+	return chunks, nil
+}
+
+// GetPartEnd 计算结束位置
+func GetPartEnd(begin int64, total int64, per int64) int64 {
+	if begin+per > total {
+		return total - 1
+	}
+	return begin + per - 1
+}
+
+// crcTable returns the Table constructed from the specified polynomial
+var crcTable = func() *crc64.Table {
+	return crc64.MakeTable(crc64.ECMA)
+}

+ 105 - 0
go/gopath/src/github.com/aliyun/aliyun-oss-go-sdk/oss/utils_test.go

@@ -0,0 +1,105 @@
+package oss
+
+import (
+	. "gopkg.in/check.v1"
+)
+
+type OssUtilsSuite struct{}
+
+var _ = Suite(&OssUtilsSuite{})
+
+func (s *OssUtilsSuite) TestUtilsTime(c *C) {
+	c.Assert(GetNowSec() > 1448597674, Equals, true)
+	c.Assert(GetNowNanoSec() > 1448597674000000000, Equals, true)
+	c.Assert(len(GetNowGMT()), Equals, len("Fri, 27 Nov 2015 04:14:34 GMT"))
+}
+
+func (s *OssUtilsSuite) TestUtilsSplitFile(c *C) {
+	localFile := "../sample/BingWallpaper-2015-11-07.jpg"
+
+	// Num
+	parts, err := SplitFileByPartNum(localFile, 4)
+	c.Assert(err, IsNil)
+	c.Assert(len(parts), Equals, 4)
+	testLogger.Println("parts 4:", parts)
+	for i, part := range parts {
+		c.Assert(part.Number, Equals, i+1)
+		c.Assert(part.Offset, Equals, int64(i*120512))
+		c.Assert(part.Size, Equals, int64(120512))
+	}
+
+	parts, err = SplitFileByPartNum(localFile, 5)
+	c.Assert(err, IsNil)
+	c.Assert(len(parts), Equals, 5)
+	testLogger.Println("parts 5:", parts)
+	for i, part := range parts {
+		c.Assert(part.Number, Equals, i+1)
+		c.Assert(part.Offset, Equals, int64(i*96409))
+	}
+
+	_, err = SplitFileByPartNum(localFile, 10001)
+	c.Assert(err, NotNil)
+
+	_, err = SplitFileByPartNum(localFile, 0)
+	c.Assert(err, NotNil)
+
+	_, err = SplitFileByPartNum(localFile, -1)
+	c.Assert(err, NotNil)
+
+	_, err = SplitFileByPartNum("notexist", 1024)
+	c.Assert(err, NotNil)
+
+	// Size
+	parts, err = SplitFileByPartSize(localFile, 120512)
+	c.Assert(err, IsNil)
+	c.Assert(len(parts), Equals, 4)
+	testLogger.Println("parts 4:", parts)
+	for i, part := range parts {
+		c.Assert(part.Number, Equals, i+1)
+		c.Assert(part.Offset, Equals, int64(i*120512))
+		c.Assert(part.Size, Equals, int64(120512))
+	}
+
+	parts, err = SplitFileByPartSize(localFile, 96409)
+	c.Assert(err, IsNil)
+	c.Assert(len(parts), Equals, 6)
+	testLogger.Println("parts 6:", parts)
+	for i, part := range parts {
+		c.Assert(part.Number, Equals, i+1)
+		c.Assert(part.Offset, Equals, int64(i*96409))
+	}
+
+	_, err = SplitFileByPartSize(localFile, 0)
+	c.Assert(err, NotNil)
+
+	_, err = SplitFileByPartSize(localFile, -1)
+	c.Assert(err, NotNil)
+
+	_, err = SplitFileByPartSize(localFile, 10)
+	c.Assert(err, NotNil)
+
+	_, err = SplitFileByPartSize("noexist", 120512)
+	c.Assert(err, NotNil)
+}
+
+func (s *OssUtilsSuite) TestUtilsFileExt(c *C) {
+	c.Assert(TypeByExtension("test.txt"), Equals, "text/plain; charset=utf-8")
+	c.Assert(TypeByExtension("test.jpg"), Equals, "image/jpeg")
+	c.Assert(TypeByExtension("test.pdf"), Equals, "application/pdf")
+	c.Assert(TypeByExtension("test"), Equals, "")
+	c.Assert(TypeByExtension("/root/dir/test.txt"), Equals, "text/plain; charset=utf-8")
+	c.Assert(TypeByExtension("root/dir/test.txt"), Equals, "text/plain; charset=utf-8")
+	c.Assert(TypeByExtension("root\\dir\\test.txt"), Equals, "text/plain; charset=utf-8")
+	c.Assert(TypeByExtension("D:\\work\\dir\\test.txt"), Equals, "text/plain; charset=utf-8")
+}
+
+func (s *OssUtilsSuite) TestGetPartEnd(c *C) {
+	end := GetPartEnd(3, 10, 3)
+	c.Assert(end, Equals, int64(5))
+
+	end = GetPartEnd(9, 10, 3)
+	c.Assert(end, Equals, int64(9))
+
+	end = GetPartEnd(7, 10, 3)
+	c.Assert(end, Equals, int64(9))
+}

+ 5 - 0
go/gopath/src/github.com/astaxie/beego/.gitignore

@@ -0,0 +1,5 @@
+.idea
+.DS_Store
+*.swp
+*.swo
+beego.iml

+ 51 - 0
go/gopath/src/github.com/astaxie/beego/.travis.yml

@@ -0,0 +1,51 @@
+language: go
+
+go:
+  - tip
+  - 1.6.0
+  - 1.5.3
+  - 1.4.3
+services:
+  - redis-server
+  - mysql
+  - postgresql
+  - memcached
+env:
+  - ORM_DRIVER=sqlite3   ORM_SOURCE=$TRAVIS_BUILD_DIR/orm_test.db
+  - ORM_DRIVER=mysql    ORM_SOURCE="root:@/orm_test?charset=utf8"
+  - ORM_DRIVER=postgres ORM_SOURCE="user=postgres dbname=orm_test sslmode=disable"
+before_install:
+ - git clone git://github.com/ideawu/ssdb.git
+ - cd ssdb
+ - make
+ - cd ..
+install:
+  - go get github.com/lib/pq
+  - go get github.com/go-sql-driver/mysql
+  - go get github.com/mattn/go-sqlite3
+  - go get github.com/bradfitz/gomemcache/memcache
+  - go get github.com/garyburd/redigo/redis
+  - go get github.com/beego/x2j
+  - go get github.com/couchbase/go-couchbase
+  - go get github.com/beego/goyaml2
+  - go get github.com/belogik/goes
+  - go get github.com/siddontang/ledisdb/config
+  - go get github.com/siddontang/ledisdb/ledis
+  - go get golang.org/x/tools/cmd/vet
+  - go get github.com/golang/lint/golint
+  - go get github.com/ssdb/gossdb/ssdb
+before_script:
+  - sh -c "if [ '$ORM_DRIVER' = 'postgres' ]; then psql -c 'create database orm_test;' -U postgres; fi"
+  - sh -c "if [ '$ORM_DRIVER' = 'mysql' ]; then mysql -u root -e 'create database orm_test;'; fi"
+  - sh -c "if [ '$ORM_DRIVER' = 'sqlite' ]; then touch $TRAVIS_BUILD_DIR/orm_test.db; fi"
+  - mkdir -p res/var
+  - ./ssdb/ssdb-server ./ssdb/ssdb.conf -d
+after_script:
+  -killall -w ssdb-server
+  - rm -rf ./res/var/*
+script:
+  - go vet -x ./...
+  - $HOME/gopath/bin/golint ./...
+  - go test -v ./...
+notifications:
+  webhooks: https://hooks.pubu.im/services/z7m9bvybl3rgtg9

+ 52 - 0
go/gopath/src/github.com/astaxie/beego/CONTRIBUTING.md

@@ -0,0 +1,52 @@
+# Contributing to beego
+
+beego is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+Here are instructions to get you started. They are probably not perfect, 
+please let us know if anything feels wrong or incomplete.
+
+## Contribution guidelines
+
+### Pull requests
+
+First of all. beego follow the gitflow. So please send you pull request 
+to **develop** branch. We will close the pull request to master branch.
+
+We are always happy to receive pull requests, and do our best to
+review them as fast as possible. Not sure if that typo is worth a pull
+request? Do it! We will appreciate it.
+
+If your pull request is not accepted on the first try, don't be
+discouraged! Sometimes we can make a mistake, please do more explaining 
+for us. We will appreciate it.
+
+We're trying very hard to keep beego simple and fast. We don't want it
+to do everything for everybody. This means that we might decide against
+incorporating a new feature. But we will give you some advice on how to 
+do it in other way.
+
+### Create issues
+
+Any significant improvement should be documented as [a GitHub
+issue](https://github.com/astaxie/beego/issues) before anybody
+starts working on it. 
+
+Also when filing an issue, make sure to answer these five questions:
+
+- What version of beego are you using (bee version)?
+- What operating system and processor architecture are you using?
+- What did you do?
+- What did you expect to see?
+- What did you see instead?
+
+### but check existing issues and docs first!
+
+Please take a moment to check that an issue doesn't already exist
+documenting your bug report or improvement proposal. If it does, it
+never hurts to add a quick "+1" or "I have this problem too". This will
+help prioritize the most common problems and requests.
+
+Also if you don't know how to use it. please make sure you have read though
+the docs in http://beego.me/docs

+ 13 - 0
go/gopath/src/github.com/astaxie/beego/LICENSE

@@ -0,0 +1,13 @@
+Copyright 2014 astaxie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 61 - 0
go/gopath/src/github.com/astaxie/beego/README.md

@@ -0,0 +1,61 @@
+## Beego
+
+[![Build Status](https://travis-ci.org/astaxie/beego.svg?branch=master)](https://travis-ci.org/astaxie/beego)
+[![GoDoc](http://godoc.org/github.com/astaxie/beego?status.svg)](http://godoc.org/github.com/astaxie/beego)
+
+beego is used for rapid development of RESTful APIs, web apps and backend services in Go.
+It is inspired by Tornado, Sinatra and Flask. beego has some Go-specific features such as interfaces and struct embedding.
+
+More info [beego.me](http://beego.me)
+
+##Quick Start
+######Download and install
+
+    go get github.com/astaxie/beego
+
+######Create file `hello.go`
+```go
+package main
+
+import "github.com/astaxie/beego"
+
+func main(){
+    beego.Run()
+}
+```
+######Build and run
+```bash
+    go build hello.go
+    ./hello
+```
+######Congratulations! 
+You just built your first beego app.
+Open your browser and visit `http://localhost:8000`.
+Please see [Documentation](http://beego.me/docs) for more.
+
+## Features
+
+* RESTful support
+* MVC architecture
+* Modularity
+* Auto API documents
+* Annotation router
+* Namespace
+* Powerful development tools
+* Full stack for Web & API
+
+## Documentation
+
+* [English](http://beego.me/docs/intro/)
+* [中文文档](http://beego.me/docs/intro/)
+* [Русский](http://beego.me/docs/intro/)
+
+## Community
+
+* [http://beego.me/community](http://beego.me/community)
+* Welcome to join us in Slack: [https://beego.slack.com](https://beego.slack.com), you can get invited from [here](https://github.com/beego/beedoc/issues/232)
+
+## LICENSE
+
+beego source code is licensed under the Apache Licence, Version 2.0
+(http://www.apache.org/licenses/LICENSE-2.0.html).

+ 424 - 0
go/gopath/src/github.com/astaxie/beego/admin.go

@@ -0,0 +1,424 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package beego
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"os"
+	"text/template"
+	"time"
+
+	"github.com/astaxie/beego/grace"
+	"github.com/astaxie/beego/toolbox"
+	"github.com/astaxie/beego/utils"
+)
+
+// BeeAdminApp is the default adminApp used by admin module.
+var beeAdminApp *adminApp
+
+// FilterMonitorFunc is default monitor filter when admin module is enable.
+// if this func returns, admin module records qbs for this request by condition of this function logic.
+// usage:
+// 	func MyFilterMonitor(method, requestPath string, t time.Duration) bool {
+//	 	if method == "POST" {
+//			return false
+//	 	}
+//	 	if t.Nanoseconds() < 100 {
+//			return false
+//	 	}
+//	 	if strings.HasPrefix(requestPath, "/astaxie") {
+//			return false
+//	 	}
+//	 	return true
+// 	}
+// 	beego.FilterMonitorFunc = MyFilterMonitor.
+var FilterMonitorFunc func(string, string, time.Duration) bool
+
+func init() {
+	beeAdminApp = &adminApp{
+		routers: make(map[string]http.HandlerFunc),
+	}
+	beeAdminApp.Route("/", adminIndex)
+	beeAdminApp.Route("/qps", qpsIndex)
+	beeAdminApp.Route("/prof", profIndex)
+	beeAdminApp.Route("/healthcheck", healthcheck)
+	beeAdminApp.Route("/task", taskStatus)
+	beeAdminApp.Route("/listconf", listConf)
+	FilterMonitorFunc = func(string, string, time.Duration) bool { return true }
+}
+
+// AdminIndex is the default http.Handler for admin module.
+// it matches url pattern "/".
+func adminIndex(rw http.ResponseWriter, r *http.Request) {
+	execTpl(rw, map[interface{}]interface{}{}, indexTpl, defaultScriptsTpl)
+}
+
+// QpsIndex is the http.Handler for writing qbs statistics map result info in http.ResponseWriter.
+// it's registered with url pattern "/qbs" in admin module.
+func qpsIndex(rw http.ResponseWriter, r *http.Request) {
+	data := make(map[interface{}]interface{})
+	data["Content"] = toolbox.StatisticsMap.GetMap()
+	execTpl(rw, data, qpsTpl, defaultScriptsTpl)
+}
+
+// ListConf is the http.Handler of displaying all beego configuration values as key/value pair.
+// it's registered with url pattern "/listconf" in admin module.
+func listConf(rw http.ResponseWriter, r *http.Request) {
+	r.ParseForm()
+	command := r.Form.Get("command")
+	if command == "" {
+		rw.Write([]byte("command not support"))
+		return
+	}
+
+	data := make(map[interface{}]interface{})
+	switch command {
+	case "conf":
+		m := make(map[string]interface{})
+		m["AppConfigPath"] = appConfigPath
+		m["AppConfigProvider"] = appConfigProvider
+		m["BConfig.AppName"] = BConfig.AppName
+		m["BConfig.RunMode"] = BConfig.RunMode
+		m["BConfig.RouterCaseSensitive"] = BConfig.RouterCaseSensitive
+		m["BConfig.ServerName"] = BConfig.ServerName
+		m["BConfig.RecoverPanic"] = BConfig.RecoverPanic
+		m["BConfig.CopyRequestBody"] = BConfig.CopyRequestBody
+		m["BConfig.EnableGzip"] = BConfig.EnableGzip
+		m["BConfig.MaxMemory"] = BConfig.MaxMemory
+		m["BConfig.EnableErrorsShow"] = BConfig.EnableErrorsShow
+		m["BConfig.Listen.Graceful"] = BConfig.Listen.Graceful
+		m["BConfig.Listen.ServerTimeOut"] = BConfig.Listen.ServerTimeOut
+		m["BConfig.Listen.ListenTCP4"] = BConfig.Listen.ListenTCP4
+		m["BConfig.Listen.EnableHTTP"] = BConfig.Listen.EnableHTTP
+		m["BConfig.Listen.HTTPAddr"] = BConfig.Listen.HTTPAddr
+		m["BConfig.Listen.HTTPPort"] = BConfig.Listen.HTTPPort
+		m["BConfig.Listen.EnableHTTPS"] = BConfig.Listen.EnableHTTPS
+		m["BConfig.Listen.HTTPSAddr"] = BConfig.Listen.HTTPSAddr
+		m["BConfig.Listen.HTTPSPort"] = BConfig.Listen.HTTPSPort
+		m["BConfig.Listen.HTTPSCertFile"] = BConfig.Listen.HTTPSCertFile
+		m["BConfig.Listen.HTTPSKeyFile"] = BConfig.Listen.HTTPSKeyFile
+		m["BConfig.Listen.EnableAdmin"] = BConfig.Listen.EnableAdmin
+		m["BConfig.Listen.AdminAddr"] = BConfig.Listen.AdminAddr
+		m["BConfig.Listen.AdminPort"] = BConfig.Listen.AdminPort
+		m["BConfig.Listen.EnableFcgi"] = BConfig.Listen.EnableFcgi
+		m["BConfig.Listen.EnableStdIo"] = BConfig.Listen.EnableStdIo
+		m["BConfig.WebConfig.AutoRender"] = BConfig.WebConfig.AutoRender
+		m["BConfig.WebConfig.EnableDocs"] = BConfig.WebConfig.EnableDocs
+		m["BConfig.WebConfig.FlashName"] = BConfig.WebConfig.FlashName
+		m["BConfig.WebConfig.FlashSeparator"] = BConfig.WebConfig.FlashSeparator
+		m["BConfig.WebConfig.DirectoryIndex"] = BConfig.WebConfig.DirectoryIndex
+		m["BConfig.WebConfig.StaticDir"] = BConfig.WebConfig.StaticDir
+		m["BConfig.WebConfig.StaticExtensionsToGzip"] = BConfig.WebConfig.StaticExtensionsToGzip
+		m["BConfig.WebConfig.TemplateLeft"] = BConfig.WebConfig.TemplateLeft
+		m["BConfig.WebConfig.TemplateRight"] = BConfig.WebConfig.TemplateRight
+		m["BConfig.WebConfig.ViewsPath"] = BConfig.WebConfig.ViewsPath
+		m["BConfig.WebConfig.EnableXSRF"] = BConfig.WebConfig.EnableXSRF
+		m["BConfig.WebConfig.XSRFKEY"] = BConfig.WebConfig.XSRFKey
+		m["BConfig.WebConfig.XSRFExpire"] = BConfig.WebConfig.XSRFExpire
+		m["BConfig.WebConfig.Session.SessionOn"] = BConfig.WebConfig.Session.SessionOn
+		m["BConfig.WebConfig.Session.SessionProvider"] = BConfig.WebConfig.Session.SessionProvider
+		m["BConfig.WebConfig.Session.SessionName"] = BConfig.WebConfig.Session.SessionName
+		m["BConfig.WebConfig.Session.SessionGCMaxLifetime"] = BConfig.WebConfig.Session.SessionGCMaxLifetime
+		m["BConfig.WebConfig.Session.SessionProviderConfig"] = BConfig.WebConfig.Session.SessionProviderConfig
+		m["BConfig.WebConfig.Session.SessionCookieLifeTime"] = BConfig.WebConfig.Session.SessionCookieLifeTime
+		m["BConfig.WebConfig.Session.SessionAutoSetCookie"] = BConfig.WebConfig.Session.SessionAutoSetCookie
+		m["BConfig.WebConfig.Session.SessionDomain"] = BConfig.WebConfig.Session.SessionDomain
+		m["BConfig.Log.AccessLogs"] = BConfig.Log.AccessLogs
+		m["BConfig.Log.FileLineNum"] = BConfig.Log.FileLineNum
+		m["BConfig.Log.Outputs"] = BConfig.Log.Outputs
+		tmpl := template.Must(template.New("dashboard").Parse(dashboardTpl))
+		tmpl = template.Must(tmpl.Parse(configTpl))
+		tmpl = template.Must(tmpl.Parse(defaultScriptsTpl))
+
+		data["Content"] = m
+
+		tmpl.Execute(rw, data)
+
+	case "router":
+		var (
+			content = map[string]interface{}{
+				"Fields": []string{
+					"Router Pattern",
+					"Methods",
+					"Controller",
+				},
+			}
+			methods     = []string{}
+			methodsData = make(map[string]interface{})
+		)
+		for method, t := range BeeApp.Handlers.routers {
+
+			resultList := new([][]string)
+
+			printTree(resultList, t)
+
+			methods = append(methods, method)
+			methodsData[method] = resultList
+		}
+
+		content["Data"] = methodsData
+		content["Methods"] = methods
+		data["Content"] = content
+		data["Title"] = "Routers"
+		execTpl(rw, data, routerAndFilterTpl, defaultScriptsTpl)
+	case "filter":
+		var (
+			content = map[string]interface{}{
+				"Fields": []string{
+					"Router Pattern",
+					"Filter Function",
+				},
+			}
+			filterTypes    = []string{}
+			filterTypeData = make(map[string]interface{})
+		)
+
+		if BeeApp.Handlers.enableFilter {
+			var filterType string
+			for k, fr := range map[int]string{
+				BeforeStatic: "Before Static",
+				BeforeRouter: "Before Router",
+				BeforeExec:   "Before Exec",
+				AfterExec:    "After Exec",
+				FinishRouter: "Finish Router"} {
+				if bf, ok := BeeApp.Handlers.filters[k]; ok {
+					filterType = fr
+					filterTypes = append(filterTypes, filterType)
+					resultList := new([][]string)
+					for _, f := range bf {
+						var result = []string{
+							fmt.Sprintf("%s", f.pattern),
+							fmt.Sprintf("%s", utils.GetFuncName(f.filterFunc)),
+						}
+						*resultList = append(*resultList, result)
+					}
+					filterTypeData[filterType] = resultList
+				}
+			}
+		}
+
+		content["Data"] = filterTypeData
+		content["Methods"] = filterTypes
+
+		data["Content"] = content
+		data["Title"] = "Filters"
+		execTpl(rw, data, routerAndFilterTpl, defaultScriptsTpl)
+	default:
+		rw.Write([]byte("command not support"))
+	}
+}
+
+func printTree(resultList *[][]string, t *Tree) {
+	for _, tr := range t.fixrouters {
+		printTree(resultList, tr)
+	}
+	if t.wildcard != nil {
+		printTree(resultList, t.wildcard)
+	}
+	for _, l := range t.leaves {
+		if v, ok := l.runObject.(*controllerInfo); ok {
+			if v.routerType == routerTypeBeego {
+				var result = []string{
+					v.pattern,
+					fmt.Sprintf("%s", v.methods),
+					fmt.Sprintf("%s", v.controllerType),
+				}
+				*resultList = append(*resultList, result)
+			} else if v.routerType == routerTypeRESTFul {
+				var result = []string{
+					v.pattern,
+					fmt.Sprintf("%s", v.methods),
+					"",
+				}
+				*resultList = append(*resultList, result)
+			} else if v.routerType == routerTypeHandler {
+				var result = []string{
+					v.pattern,
+					"",
+					"",
+				}
+				*resultList = append(*resultList, result)
+			}
+		}
+	}
+}
+
+// ProfIndex is a http.Handler for showing profile command.
+// it's in url pattern "/prof" in admin module.
+func profIndex(rw http.ResponseWriter, r *http.Request) {
+	r.ParseForm()
+	command := r.Form.Get("command")
+	if command == "" {
+		return
+	}
+
+	var (
+		format = r.Form.Get("format")
+		data   = make(map[interface{}]interface{})
+		result bytes.Buffer
+	)
+	toolbox.ProcessInput(command, &result)
+	data["Content"] = result.String()
+
+	if format == "json" && command == "gc summary" {
+		dataJSON, err := json.Marshal(data)
+		if err != nil {
+			http.Error(rw, err.Error(), http.StatusInternalServerError)
+			return
+		}
+
+		rw.Header().Set("Content-Type", "application/json")
+		rw.Write(dataJSON)
+		return
+	}
+
+	data["Title"] = command
+	defaultTpl := defaultScriptsTpl
+	if command == "gc summary" {
+		defaultTpl = gcAjaxTpl
+	}
+	execTpl(rw, data, profillingTpl, defaultTpl)
+}
+
+// Healthcheck is a http.Handler calling health checking and showing the result.
+// it's in "/healthcheck" pattern in admin module.
+func healthcheck(rw http.ResponseWriter, req *http.Request) {
+	var (
+		data       = make(map[interface{}]interface{})
+		result     = []string{}
+		resultList = new([][]string)
+		content    = map[string]interface{}{
+			"Fields": []string{"Name", "Message", "Status"},
+		}
+	)
+
+	for name, h := range toolbox.AdminCheckList {
+		if err := h.Check(); err != nil {
+			result = []string{
+				fmt.Sprintf("error"),
+				fmt.Sprintf("%s", name),
+				fmt.Sprintf("%s", err.Error()),
+			}
+
+		} else {
+			result = []string{
+				fmt.Sprintf("success"),
+				fmt.Sprintf("%s", name),
+				fmt.Sprintf("OK"),
+			}
+
+		}
+		*resultList = append(*resultList, result)
+	}
+	content["Data"] = resultList
+	data["Content"] = content
+	data["Title"] = "Health Check"
+	execTpl(rw, data, healthCheckTpl, defaultScriptsTpl)
+}
+
+// TaskStatus is a http.Handler with running task status (task name, status and the last execution).
+// it's in "/task" pattern in admin module.
+func taskStatus(rw http.ResponseWriter, req *http.Request) {
+	data := make(map[interface{}]interface{})
+
+	// Run Task
+	req.ParseForm()
+	taskname := req.Form.Get("taskname")
+	if taskname != "" {
+		if t, ok := toolbox.AdminTaskList[taskname]; ok {
+			if err := t.Run(); err != nil {
+				data["Message"] = []string{"error", fmt.Sprintf("%s", err)}
+			}
+			data["Message"] = []string{"success", fmt.Sprintf("%s run success,Now the Status is <br>%s", taskname, t.GetStatus())}
+		} else {
+			data["Message"] = []string{"warning", fmt.Sprintf("there's no task which named: %s", taskname)}
+		}
+	}
+
+	// List Tasks
+	content := make(map[string]interface{})
+	resultList := new([][]string)
+	var result = []string{}
+	var fields = []string{
+		"Task Name",
+		"Task Spec",
+		"Task Status",
+		"Last Time",
+		"",
+	}
+	for tname, tk := range toolbox.AdminTaskList {
+		result = []string{
+			tname,
+			fmt.Sprintf("%s", tk.GetSpec()),
+			fmt.Sprintf("%s", tk.GetStatus()),
+			tk.GetPrev().String(),
+		}
+		*resultList = append(*resultList, result)
+	}
+
+	content["Fields"] = fields
+	content["Data"] = resultList
+	data["Content"] = content
+	data["Title"] = "Tasks"
+	execTpl(rw, data, tasksTpl, defaultScriptsTpl)
+}
+
+func execTpl(rw http.ResponseWriter, data map[interface{}]interface{}, tpls ...string) {
+	tmpl := template.Must(template.New("dashboard").Parse(dashboardTpl))
+	for _, tpl := range tpls {
+		tmpl = template.Must(tmpl.Parse(tpl))
+	}
+	tmpl.Execute(rw, data)
+}
+
+// adminApp is an http.HandlerFunc map used as beeAdminApp.
+type adminApp struct {
+	routers map[string]http.HandlerFunc
+}
+
+// Route adds http.HandlerFunc to adminApp with url pattern.
+func (admin *adminApp) Route(pattern string, f http.HandlerFunc) {
+	admin.routers[pattern] = f
+}
+
+// Run adminApp http server.
+// Its addr is defined in configuration file as adminhttpaddr and adminhttpport.
+func (admin *adminApp) Run() {
+	if len(toolbox.AdminTaskList) > 0 {
+		toolbox.StartTask()
+	}
+	addr := BConfig.Listen.AdminAddr
+
+	if BConfig.Listen.AdminPort != 0 {
+		addr = fmt.Sprintf("%s:%d", BConfig.Listen.AdminAddr, BConfig.Listen.AdminPort)
+	}
+	for p, f := range admin.routers {
+		http.Handle(p, f)
+	}
+	BeeLogger.Info("Admin server Running on %s", addr)
+
+	var err error
+	if BConfig.Listen.Graceful {
+		err = grace.ListenAndServe(addr, nil)
+	} else {
+		err = http.ListenAndServe(addr, nil)
+	}
+	if err != nil {
+		BeeLogger.Critical("Admin ListenAndServe: ", err, fmt.Sprintf("%d", os.Getpid()))
+	}
+}

Разница между файлами не показана из-за своего большого размера
+ 355 - 0
go/gopath/src/github.com/astaxie/beego/adminui.go


+ 362 - 0
go/gopath/src/github.com/astaxie/beego/app.go

@@ -0,0 +1,362 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package beego
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+	"net/http/fcgi"
+	"os"
+	"path"
+	"time"
+
+	"github.com/astaxie/beego/grace"
+	"github.com/astaxie/beego/utils"
+)
+
+var (
+	// BeeApp is an application instance
+	BeeApp *App
+)
+
+func init() {
+	// create beego application
+	BeeApp = NewApp()
+}
+
+// App defines beego application with a new PatternServeMux.
+type App struct {
+	Handlers *ControllerRegister
+	Server   *http.Server
+}
+
+// NewApp returns a new beego application.
+func NewApp() *App {
+	cr := NewControllerRegister()
+	app := &App{Handlers: cr, Server: &http.Server{}}
+	return app
+}
+
+// Run beego application.
+func (app *App) Run() {
+	addr := BConfig.Listen.HTTPAddr
+
+	if BConfig.Listen.HTTPPort != 0 {
+		addr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPAddr, BConfig.Listen.HTTPPort)
+	}
+
+	var (
+		err        error
+		l          net.Listener
+		endRunning = make(chan bool, 1)
+	)
+
+	// run cgi server
+	if BConfig.Listen.EnableFcgi {
+		if BConfig.Listen.EnableStdIo {
+			if err = fcgi.Serve(nil, app.Handlers); err == nil { // standard I/O
+				BeeLogger.Info("Use FCGI via standard I/O")
+			} else {
+				BeeLogger.Critical("Cannot use FCGI via standard I/O", err)
+			}
+			return
+		}
+		if BConfig.Listen.HTTPPort == 0 {
+			// remove the Socket file before start
+			if utils.FileExists(addr) {
+				os.Remove(addr)
+			}
+			l, err = net.Listen("unix", addr)
+		} else {
+			l, err = net.Listen("tcp", addr)
+		}
+		if err != nil {
+			BeeLogger.Critical("Listen: ", err)
+		}
+		if err = fcgi.Serve(l, app.Handlers); err != nil {
+			BeeLogger.Critical("fcgi.Serve: ", err)
+		}
+		return
+	}
+
+	app.Server.Handler = app.Handlers
+	app.Server.ReadTimeout = time.Duration(BConfig.Listen.ServerTimeOut) * time.Second
+	app.Server.WriteTimeout = time.Duration(BConfig.Listen.ServerTimeOut) * time.Second
+
+	// run graceful mode
+	if BConfig.Listen.Graceful {
+		httpsAddr := BConfig.Listen.HTTPSAddr
+		app.Server.Addr = httpsAddr
+		if BConfig.Listen.EnableHTTPS {
+			go func() {
+				time.Sleep(20 * time.Microsecond)
+				if BConfig.Listen.HTTPSPort != 0 {
+					httpsAddr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPSAddr, BConfig.Listen.HTTPSPort)
+					app.Server.Addr = httpsAddr
+				}
+				server := grace.NewServer(httpsAddr, app.Handlers)
+				server.Server.ReadTimeout = app.Server.ReadTimeout
+				server.Server.WriteTimeout = app.Server.WriteTimeout
+				if err := server.ListenAndServeTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile); err != nil {
+					BeeLogger.Critical("ListenAndServeTLS: ", err, fmt.Sprintf("%d", os.Getpid()))
+					time.Sleep(100 * time.Microsecond)
+					endRunning <- true
+				}
+			}()
+		}
+		if BConfig.Listen.EnableHTTP {
+			go func() {
+				server := grace.NewServer(addr, app.Handlers)
+				server.Server.ReadTimeout = app.Server.ReadTimeout
+				server.Server.WriteTimeout = app.Server.WriteTimeout
+				if BConfig.Listen.ListenTCP4 {
+					server.Network = "tcp4"
+				}
+				if err := server.ListenAndServe(); err != nil {
+					BeeLogger.Critical("ListenAndServe: ", err, fmt.Sprintf("%d", os.Getpid()))
+					time.Sleep(100 * time.Microsecond)
+					endRunning <- true
+				}
+			}()
+		}
+		<-endRunning
+		return
+	}
+
+	// run normal mode
+	app.Server.Addr = addr
+	if BConfig.Listen.EnableHTTPS {
+		go func() {
+			time.Sleep(20 * time.Microsecond)
+			if BConfig.Listen.HTTPSPort != 0 {
+				app.Server.Addr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPSAddr, BConfig.Listen.HTTPSPort)
+			}
+			BeeLogger.Info("https server Running on %s", app.Server.Addr)
+			if err := app.Server.ListenAndServeTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile); err != nil {
+				BeeLogger.Critical("ListenAndServeTLS: ", err)
+				time.Sleep(100 * time.Microsecond)
+				endRunning <- true
+			}
+		}()
+	}
+	if BConfig.Listen.EnableHTTP {
+		go func() {
+			app.Server.Addr = addr
+			BeeLogger.Info("http server Running on %s", app.Server.Addr)
+			if BConfig.Listen.ListenTCP4 {
+				ln, err := net.Listen("tcp4", app.Server.Addr)
+				if err != nil {
+					BeeLogger.Critical("ListenAndServe: ", err)
+					time.Sleep(100 * time.Microsecond)
+					endRunning <- true
+					return
+				}
+				if err = app.Server.Serve(ln); err != nil {
+					BeeLogger.Critical("ListenAndServe: ", err)
+					time.Sleep(100 * time.Microsecond)
+					endRunning <- true
+					return
+				}
+			} else {
+				if err := app.Server.ListenAndServe(); err != nil {
+					BeeLogger.Critical("ListenAndServe: ", err)
+					time.Sleep(100 * time.Microsecond)
+					endRunning <- true
+				}
+			}
+		}()
+	}
+	<-endRunning
+}
+
+// Router adds a patterned controller handler to BeeApp.
+// it's an alias method of App.Router.
+// usage:
+//  simple router
+//  beego.Router("/admin", &admin.UserController{})
+//  beego.Router("/admin/index", &admin.ArticleController{})
+//
+//  regex router
+//
+//  beego.Router("/api/:id([0-9]+)", &controllers.RController{})
+//
+//  custom rules
+//  beego.Router("/api/list",&RestController{},"*:ListFood")
+//  beego.Router("/api/create",&RestController{},"post:CreateFood")
+//  beego.Router("/api/update",&RestController{},"put:UpdateFood")
+//  beego.Router("/api/delete",&RestController{},"delete:DeleteFood")
+func Router(rootpath string, c ControllerInterface, mappingMethods ...string) *App {
+	BeeApp.Handlers.Add(rootpath, c, mappingMethods...)
+	return BeeApp
+}
+
+// Include will generate router file in the router/xxx.go from the controller's comments
+// usage:
+// beego.Include(&BankAccount{}, &OrderController{},&RefundController{},&ReceiptController{})
+// type BankAccount struct{
+//   beego.Controller
+// }
+//
+// register the function
+// func (b *BankAccount)Mapping(){
+//  b.Mapping("ShowAccount" , b.ShowAccount)
+//  b.Mapping("ModifyAccount", b.ModifyAccount)
+//}
+//
+// //@router /account/:id  [get]
+// func (b *BankAccount) ShowAccount(){
+//    //logic
+// }
+//
+//
+// //@router /account/:id  [post]
+// func (b *BankAccount) ModifyAccount(){
+//    //logic
+// }
+//
+// the comments @router url methodlist
+// url support all the function Router's pattern
+// methodlist [get post head put delete options *]
+func Include(cList ...ControllerInterface) *App {
+	BeeApp.Handlers.Include(cList...)
+	return BeeApp
+}
+
+// RESTRouter adds a restful controller handler to BeeApp.
+// its' controller implements beego.ControllerInterface and
+// defines a param "pattern/:objectId" to visit each resource.
+func RESTRouter(rootpath string, c ControllerInterface) *App {
+	Router(rootpath, c)
+	Router(path.Join(rootpath, ":objectId"), c)
+	return BeeApp
+}
+
+// AutoRouter adds defined controller handler to BeeApp.
+// it's same to App.AutoRouter.
+// if beego.AddAuto(&MainContorlller{}) and MainController has methods List and Page,
+// visit the url /main/list to exec List function or /main/page to exec Page function.
+func AutoRouter(c ControllerInterface) *App {
+	BeeApp.Handlers.AddAuto(c)
+	return BeeApp
+}
+
+// AutoPrefix adds controller handler to BeeApp with prefix.
+// it's same to App.AutoRouterWithPrefix.
+// if beego.AutoPrefix("/admin",&MainContorlller{}) and MainController has methods List and Page,
+// visit the url /admin/main/list to exec List function or /admin/main/page to exec Page function.
+func AutoPrefix(prefix string, c ControllerInterface) *App {
+	BeeApp.Handlers.AddAutoPrefix(prefix, c)
+	return BeeApp
+}
+
+// Get used to register router for Get method
+// usage:
+//    beego.Get("/", func(ctx *context.Context){
+//          ctx.Output.Body("hello world")
+//    })
+func Get(rootpath string, f FilterFunc) *App {
+	BeeApp.Handlers.Get(rootpath, f)
+	return BeeApp
+}
+
+// Post used to register router for Post method
+// usage:
+//    beego.Post("/api", func(ctx *context.Context){
+//          ctx.Output.Body("hello world")
+//    })
+func Post(rootpath string, f FilterFunc) *App {
+	BeeApp.Handlers.Post(rootpath, f)
+	return BeeApp
+}
+
+// Delete used to register router for Delete method
+// usage:
+//    beego.Delete("/api", func(ctx *context.Context){
+//          ctx.Output.Body("hello world")
+//    })
+func Delete(rootpath string, f FilterFunc) *App {
+	BeeApp.Handlers.Delete(rootpath, f)
+	return BeeApp
+}
+
+// Put used to register router for Put method
+// usage:
+//    beego.Put("/api", func(ctx *context.Context){
+//          ctx.Output.Body("hello world")
+//    })
+func Put(rootpath string, f FilterFunc) *App {
+	BeeApp.Handlers.Put(rootpath, f)
+	return BeeApp
+}
+
+// Head used to register router for Head method
+// usage:
+//    beego.Head("/api", func(ctx *context.Context){
+//          ctx.Output.Body("hello world")
+//    })
+func Head(rootpath string, f FilterFunc) *App {
+	BeeApp.Handlers.Head(rootpath, f)
+	return BeeApp
+}
+
+// Options used to register router for Options method
+// usage:
+//    beego.Options("/api", func(ctx *context.Context){
+//          ctx.Output.Body("hello world")
+//    })
+func Options(rootpath string, f FilterFunc) *App {
+	BeeApp.Handlers.Options(rootpath, f)
+	return BeeApp
+}
+
+// Patch used to register router for Patch method
+// usage:
+//    beego.Patch("/api", func(ctx *context.Context){
+//          ctx.Output.Body("hello world")
+//    })
+func Patch(rootpath string, f FilterFunc) *App {
+	BeeApp.Handlers.Patch(rootpath, f)
+	return BeeApp
+}
+
+// Any used to register router for all methods
+// usage:
+//    beego.Any("/api", func(ctx *context.Context){
+//          ctx.Output.Body("hello world")
+//    })
+func Any(rootpath string, f FilterFunc) *App {
+	BeeApp.Handlers.Any(rootpath, f)
+	return BeeApp
+}
+
+// Handler used to register a Handler router
+// usage:
+//    beego.Handler("/api", func(ctx *context.Context){
+//          ctx.Output.Body("hello world")
+//    })
+func Handler(rootpath string, h http.Handler, options ...interface{}) *App {
+	BeeApp.Handlers.Handler(rootpath, h, options...)
+	return BeeApp
+}
+
+// InsertFilter adds a FilterFunc with pattern condition and action constant.
+// The pos means action constant including
+// beego.BeforeStatic, beego.BeforeRouter, beego.BeforeExec, beego.AfterExec and beego.FinishRouter.
+// The bool params is for setting the returnOnOutput value (false allows multiple filters to execute)
+func InsertFilter(pattern string, pos int, filter FilterFunc, params ...bool) *App {
+	BeeApp.Handlers.InsertFilter(pattern, pos, filter, params...)
+	return BeeApp
+}

+ 91 - 0
go/gopath/src/github.com/astaxie/beego/beego.go

@@ -0,0 +1,91 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package beego
+
+import (
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+)
+
+const (
+	// VERSION represent beego web framework version.
+	VERSION = "1.6.1"
+
+	// DEV is for develop
+	DEV = "dev"
+	// PROD is for production
+	PROD = "prod"
+)
+
+//hook function to run
+type hookfunc func() error
+
+var (
+	hooks = make([]hookfunc, 0) //hook function slice to store the hookfunc
+)
+
+// AddAPPStartHook is used to register the hookfunc
+// The hookfuncs will run in beego.Run()
+// such as sessionInit, middlerware start, buildtemplate, admin start
+func AddAPPStartHook(hf hookfunc) {
+	hooks = append(hooks, hf)
+}
+
+// Run beego application.
+// beego.Run() default run on HttpPort
+// beego.Run("localhost")
+// beego.Run(":8089")
+// beego.Run("127.0.0.1:8089")
+func Run(params ...string) {
+	initBeforeHTTPRun()
+
+	if len(params) > 0 && params[0] != "" {
+		strs := strings.Split(params[0], ":")
+		if len(strs) > 0 && strs[0] != "" {
+			BConfig.Listen.HTTPAddr = strs[0]
+		}
+		if len(strs) > 1 && strs[1] != "" {
+			BConfig.Listen.HTTPPort, _ = strconv.Atoi(strs[1])
+		}
+	}
+
+	BeeApp.Run()
+}
+
+func initBeforeHTTPRun() {
+	//init hooks
+	AddAPPStartHook(registerMime)
+	AddAPPStartHook(registerDefaultErrorHandler)
+	AddAPPStartHook(registerSession)
+	AddAPPStartHook(registerDocs)
+	AddAPPStartHook(registerTemplate)
+	AddAPPStartHook(registerAdmin)
+
+	for _, hk := range hooks {
+		if err := hk(); err != nil {
+			panic(err)
+		}
+	}
+}
+
+// TestBeegoInit is for test package init
+func TestBeegoInit(ap string) {
+	os.Setenv("BEEGO_RUNMODE", "test")
+	appConfigPath = filepath.Join(ap, "conf", "app.conf")
+	os.Chdir(ap)
+	initBeforeHTTPRun()
+}

+ 59 - 0
go/gopath/src/github.com/astaxie/beego/cache/README.md

@@ -0,0 +1,59 @@
+## cache
+cache is a Go cache manager. It can use many cache adapters. The repo is inspired by `database/sql` .
+
+
+## How to install?
+
+	go get github.com/astaxie/beego/cache
+
+
+## What adapters are supported?
+
+As of now this cache support memory, Memcache and Redis.
+
+
+## How to use it?
+
+First you must import it
+
+	import (
+		"github.com/astaxie/beego/cache"
+	)
+
+Then init a Cache (example with memory adapter)
+
+	bm, err := cache.NewCache("memory", `{"interval":60}`)	
+
+Use it like this:	
+	
+	bm.Put("astaxie", 1, 10 * time.Second)
+	bm.Get("astaxie")
+	bm.IsExist("astaxie")
+	bm.Delete("astaxie")
+
+
+## Memory adapter
+
+Configure memory adapter like this:
+
+	{"interval":60}
+
+interval means the gc time. The cache will check at each time interval, whether item has expired.
+
+
+## Memcache adapter
+
+Memcache adapter use the [gomemcache](http://github.com/bradfitz/gomemcache) client.
+
+Configure like this:
+
+	{"conn":"127.0.0.1:11211"}
+
+
+## Redis adapter
+
+Redis adapter use the [redigo](http://github.com/garyburd/redigo) client.
+
+Configure like this:
+
+	{"conn":":6039"}

+ 103 - 0
go/gopath/src/github.com/astaxie/beego/cache/cache.go

@@ -0,0 +1,103 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cache provide a Cache interface and some implemetn engine
+// Usage:
+//
+// import(
+//   "github.com/astaxie/beego/cache"
+// )
+//
+// bm, err := cache.NewCache("memory", `{"interval":60}`)
+//
+// Use it like this:
+//
+//	bm.Put("astaxie", 1, 10 * time.Second)
+//	bm.Get("astaxie")
+//	bm.IsExist("astaxie")
+//	bm.Delete("astaxie")
+//
+//  more docs http://beego.me/docs/module/cache.md
+package cache
+
+import (
+	"fmt"
+	"time"
+)
+
+// Cache interface contains all behaviors for cache adapter.
+// usage:
+//	cache.Register("file",cache.NewFileCache) // this operation is run in init method of file.go.
+//	c,err := cache.NewCache("file","{....}")
+//	c.Put("key",value, 3600 * time.Second)
+//	v := c.Get("key")
+//
+//	c.Incr("counter")  // now is 1
+//	c.Incr("counter")  // now is 2
+//	count := c.Get("counter").(int)
+type Cache interface {
+	// get cached value by key.
+	Get(key string) interface{}
+	// GetMulti is a batch version of Get.
+	GetMulti(keys []string) []interface{}
+	// set cached value with key and expire time.
+	Put(key string, val interface{}, timeout time.Duration) error
+	// delete cached value by key.
+	Delete(key string) error
+	// increase cached int value by key, as a counter.
+	Incr(key string) error
+	// decrease cached int value by key, as a counter.
+	Decr(key string) error
+	// check if cached value exists or not.
+	IsExist(key string) bool
+	// clear all cache.
+	ClearAll() error
+	// start gc routine based on config string settings.
+	StartAndGC(config string) error
+}
+
+// Instance is a function create a new Cache Instance
+type Instance func() Cache
+
+var adapters = make(map[string]Instance)
+
+// Register makes a cache adapter available by the adapter name.
+// If Register is called twice with the same name or if driver is nil,
+// it panics.
+func Register(name string, adapter Instance) {
+	if adapter == nil {
+		panic("cache: Register adapter is nil")
+	}
+	if _, ok := adapters[name]; ok {
+		panic("cache: Register called twice for adapter " + name)
+	}
+	adapters[name] = adapter
+}
+
+// NewCache Create a new cache driver by adapter name and config string.
+// config need to be correct JSON as string: {"interval":360}.
+// it will start gc automatically.
+func NewCache(adapterName, config string) (adapter Cache, err error) {
+	instanceFunc, ok := adapters[adapterName]
+	if !ok {
+		err = fmt.Errorf("cache: unknown adapter name %q (forgot to import?)", adapterName)
+		return
+	}
+	adapter = instanceFunc()
+	err = adapter.StartAndGC(config)
+	if err != nil {
+		adapter = nil
+	}
+	return
+}

+ 168 - 0
go/gopath/src/github.com/astaxie/beego/cache/cache_test.go

@@ -0,0 +1,168 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cache
+
+import (
+	"os"
+	"testing"
+	"time"
+)
+
+func TestCache(t *testing.T) {
+	bm, err := NewCache("memory", `{"interval":20}`)
+	if err != nil {
+		t.Error("init err")
+	}
+	timeoutDuration := 10 * time.Second
+	if err = bm.Put("astaxie", 1, timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+
+	if v := bm.Get("astaxie"); v.(int) != 1 {
+		t.Error("get err")
+	}
+
+	time.Sleep(30 * time.Second)
+
+	if bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+
+	if err = bm.Put("astaxie", 1, timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+
+	if err = bm.Incr("astaxie"); err != nil {
+		t.Error("Incr Error", err)
+	}
+
+	if v := bm.Get("astaxie"); v.(int) != 2 {
+		t.Error("get err")
+	}
+
+	if err = bm.Decr("astaxie"); err != nil {
+		t.Error("Decr Error", err)
+	}
+
+	if v := bm.Get("astaxie"); v.(int) != 1 {
+		t.Error("get err")
+	}
+	bm.Delete("astaxie")
+	if bm.IsExist("astaxie") {
+		t.Error("delete err")
+	}
+
+	//test GetMulti
+	if err = bm.Put("astaxie", "author", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+	if v := bm.Get("astaxie"); v.(string) != "author" {
+		t.Error("get err")
+	}
+
+	if err = bm.Put("astaxie1", "author1", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie1") {
+		t.Error("check err")
+	}
+
+	vv := bm.GetMulti([]string{"astaxie", "astaxie1"})
+	if len(vv) != 2 {
+		t.Error("GetMulti ERROR")
+	}
+	if vv[0].(string) != "author" {
+		t.Error("GetMulti ERROR")
+	}
+	if vv[1].(string) != "author1" {
+		t.Error("GetMulti ERROR")
+	}
+}
+
+func TestFileCache(t *testing.T) {
+	bm, err := NewCache("file", `{"CachePath":"cache","FileSuffix":".bin","DirectoryLevel":2,"EmbedExpiry":0}`)
+	if err != nil {
+		t.Error("init err")
+	}
+	timeoutDuration := 10 * time.Second
+	if err = bm.Put("astaxie", 1, timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+
+	if v := bm.Get("astaxie"); v.(int) != 1 {
+		t.Error("get err")
+	}
+
+	if err = bm.Incr("astaxie"); err != nil {
+		t.Error("Incr Error", err)
+	}
+
+	if v := bm.Get("astaxie"); v.(int) != 2 {
+		t.Error("get err")
+	}
+
+	if err = bm.Decr("astaxie"); err != nil {
+		t.Error("Decr Error", err)
+	}
+
+	if v := bm.Get("astaxie"); v.(int) != 1 {
+		t.Error("get err")
+	}
+	bm.Delete("astaxie")
+	if bm.IsExist("astaxie") {
+		t.Error("delete err")
+	}
+
+	//test string
+	if err = bm.Put("astaxie", "author", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+	if v := bm.Get("astaxie"); v.(string) != "author" {
+		t.Error("get err")
+	}
+
+	//test GetMulti
+	if err = bm.Put("astaxie1", "author1", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie1") {
+		t.Error("check err")
+	}
+
+	vv := bm.GetMulti([]string{"astaxie", "astaxie1"})
+	if len(vv) != 2 {
+		t.Error("GetMulti ERROR")
+	}
+	if vv[0].(string) != "author" {
+		t.Error("GetMulti ERROR")
+	}
+	if vv[1].(string) != "author1" {
+		t.Error("GetMulti ERROR")
+	}
+
+	os.RemoveAll("cache")
+}

+ 100 - 0
go/gopath/src/github.com/astaxie/beego/cache/conv.go

@@ -0,0 +1,100 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cache
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// GetString convert interface to string.
+func GetString(v interface{}) string {
+	switch result := v.(type) {
+	case string:
+		return result
+	case []byte:
+		return string(result)
+	default:
+		if v != nil {
+			return fmt.Sprintf("%v", result)
+		}
+	}
+	return ""
+}
+
+// GetInt convert interface to int.
+func GetInt(v interface{}) int {
+	switch result := v.(type) {
+	case int:
+		return result
+	case int32:
+		return int(result)
+	case int64:
+		return int(result)
+	default:
+		if d := GetString(v); d != "" {
+			value, _ := strconv.Atoi(d)
+			return value
+		}
+	}
+	return 0
+}
+
+// GetInt64 convert interface to int64.
+func GetInt64(v interface{}) int64 {
+	switch result := v.(type) {
+	case int:
+		return int64(result)
+	case int32:
+		return int64(result)
+	case int64:
+		return result
+	default:
+
+		if d := GetString(v); d != "" {
+			value, _ := strconv.ParseInt(d, 10, 64)
+			return value
+		}
+	}
+	return 0
+}
+
+// GetFloat64 convert interface to float64.
+func GetFloat64(v interface{}) float64 {
+	switch result := v.(type) {
+	case float64:
+		return result
+	default:
+		if d := GetString(v); d != "" {
+			value, _ := strconv.ParseFloat(d, 64)
+			return value
+		}
+	}
+	return 0
+}
+
+// GetBool convert interface to bool.
+func GetBool(v interface{}) bool {
+	switch result := v.(type) {
+	case bool:
+		return result
+	default:
+		if d := GetString(v); d != "" {
+			value, _ := strconv.ParseBool(d)
+			return value
+		}
+	}
+	return false
+}

+ 143 - 0
go/gopath/src/github.com/astaxie/beego/cache/conv_test.go

@@ -0,0 +1,143 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cache
+
+import (
+	"testing"
+)
+
+func TestGetString(t *testing.T) {
+	var t1 = "test1"
+	if "test1" != GetString(t1) {
+		t.Error("get string from string error")
+	}
+	var t2 = []byte("test2")
+	if "test2" != GetString(t2) {
+		t.Error("get string from byte array error")
+	}
+	var t3 = 1
+	if "1" != GetString(t3) {
+		t.Error("get string from int error")
+	}
+	var t4 int64 = 1
+	if "1" != GetString(t4) {
+		t.Error("get string from int64 error")
+	}
+	var t5 = 1.1
+	if "1.1" != GetString(t5) {
+		t.Error("get string from float64 error")
+	}
+
+	if "" != GetString(nil) {
+		t.Error("get string from nil error")
+	}
+}
+
+func TestGetInt(t *testing.T) {
+	var t1 = 1
+	if 1 != GetInt(t1) {
+		t.Error("get int from int error")
+	}
+	var t2 int32 = 32
+	if 32 != GetInt(t2) {
+		t.Error("get int from int32 error")
+	}
+	var t3 int64 = 64
+	if 64 != GetInt(t3) {
+		t.Error("get int from int64 error")
+	}
+	var t4 = "128"
+	if 128 != GetInt(t4) {
+		t.Error("get int from num string error")
+	}
+	if 0 != GetInt(nil) {
+		t.Error("get int from nil error")
+	}
+}
+
+func TestGetInt64(t *testing.T) {
+	var i int64 = 1
+	var t1 = 1
+	if i != GetInt64(t1) {
+		t.Error("get int64 from int error")
+	}
+	var t2 int32 = 1
+	if i != GetInt64(t2) {
+		t.Error("get int64 from int32 error")
+	}
+	var t3 int64 = 1
+	if i != GetInt64(t3) {
+		t.Error("get int64 from int64 error")
+	}
+	var t4 = "1"
+	if i != GetInt64(t4) {
+		t.Error("get int64 from num string error")
+	}
+	if 0 != GetInt64(nil) {
+		t.Error("get int64 from nil")
+	}
+}
+
+func TestGetFloat64(t *testing.T) {
+	var f = 1.11
+	var t1 float32 = 1.11
+	if f != GetFloat64(t1) {
+		t.Error("get float64 from float32 error")
+	}
+	var t2 = 1.11
+	if f != GetFloat64(t2) {
+		t.Error("get float64 from float64 error")
+	}
+	var t3 = "1.11"
+	if f != GetFloat64(t3) {
+		t.Error("get float64 from string error")
+	}
+
+	var f2 float64 = 1
+	var t4 = 1
+	if f2 != GetFloat64(t4) {
+		t.Error("get float64 from int error")
+	}
+
+	if 0 != GetFloat64(nil) {
+		t.Error("get float64 from nil error")
+	}
+}
+
+func TestGetBool(t *testing.T) {
+	var t1 = true
+	if true != GetBool(t1) {
+		t.Error("get bool from bool error")
+	}
+	var t2 = "true"
+	if true != GetBool(t2) {
+		t.Error("get bool from string error")
+	}
+	if false != GetBool(nil) {
+		t.Error("get bool from nil error")
+	}
+}
+
+func byteArrayEquals(a []byte, b []byte) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, v := range a {
+		if v != b[i] {
+			return false
+		}
+	}
+	return true
+}

+ 274 - 0
go/gopath/src/github.com/astaxie/beego/cache/file.go

@@ -0,0 +1,274 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cache
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/gob"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+// FileCacheItem is basic unit of file cache adapter.
+// it contains data and expire time.
+type FileCacheItem struct {
+	Data       interface{}
+	Lastaccess time.Time
+	Expired    time.Time
+}
+
+// FileCache Config
+var (
+	FileCachePath           = "cache"     // cache directory
+	FileCacheFileSuffix     = ".bin"      // cache file suffix
+	FileCacheDirectoryLevel = 2           // cache file deep level if auto generated cache files.
+	FileCacheEmbedExpiry    time.Duration // cache expire time, default is no expire forever.
+)
+
+// FileCache is cache adapter for file storage.
+type FileCache struct {
+	CachePath      string
+	FileSuffix     string
+	DirectoryLevel int
+	EmbedExpiry    int
+}
+
+// NewFileCache Create new file cache with no config.
+// the level and expiry need set in method StartAndGC as config string.
+func NewFileCache() Cache {
+	//    return &FileCache{CachePath:FileCachePath, FileSuffix:FileCacheFileSuffix}
+	return &FileCache{}
+}
+
+// StartAndGC will start and begin gc for file cache.
+// the config need to be like {CachePath:"/cache","FileSuffix":".bin","DirectoryLevel":2,"EmbedExpiry":0}
+func (fc *FileCache) StartAndGC(config string) error {
+
+	var cfg map[string]string
+	json.Unmarshal([]byte(config), &cfg)
+	if _, ok := cfg["CachePath"]; !ok {
+		cfg["CachePath"] = FileCachePath
+	}
+	if _, ok := cfg["FileSuffix"]; !ok {
+		cfg["FileSuffix"] = FileCacheFileSuffix
+	}
+	if _, ok := cfg["DirectoryLevel"]; !ok {
+		cfg["DirectoryLevel"] = strconv.Itoa(FileCacheDirectoryLevel)
+	}
+	if _, ok := cfg["EmbedExpiry"]; !ok {
+		cfg["EmbedExpiry"] = strconv.FormatInt(int64(FileCacheEmbedExpiry.Seconds()), 10)
+	}
+	fc.CachePath = cfg["CachePath"]
+	fc.FileSuffix = cfg["FileSuffix"]
+	fc.DirectoryLevel, _ = strconv.Atoi(cfg["DirectoryLevel"])
+	fc.EmbedExpiry, _ = strconv.Atoi(cfg["EmbedExpiry"])
+
+	fc.Init()
+	return nil
+}
+
+// Init will make new dir for file cache if not exist.
+func (fc *FileCache) Init() {
+	if ok, _ := exists(fc.CachePath); !ok { // todo : error handle
+		_ = os.MkdirAll(fc.CachePath, os.ModePerm) // todo : error handle
+	}
+}
+
+// get cached file name. it's md5 encoded.
+func (fc *FileCache) getCacheFileName(key string) string {
+	m := md5.New()
+	io.WriteString(m, key)
+	keyMd5 := hex.EncodeToString(m.Sum(nil))
+	cachePath := fc.CachePath
+	switch fc.DirectoryLevel {
+	case 2:
+		cachePath = filepath.Join(cachePath, keyMd5[0:2], keyMd5[2:4])
+	case 1:
+		cachePath = filepath.Join(cachePath, keyMd5[0:2])
+	}
+
+	if ok, _ := exists(cachePath); !ok { // todo : error handle
+		_ = os.MkdirAll(cachePath, os.ModePerm) // todo : error handle
+	}
+
+	return filepath.Join(cachePath, fmt.Sprintf("%s%s", keyMd5, fc.FileSuffix))
+}
+
+// Get value from file cache.
+// if non-exist or expired, return empty string.
+func (fc *FileCache) Get(key string) interface{} {
+	fileData, err := FileGetContents(fc.getCacheFileName(key))
+	if err != nil {
+		return ""
+	}
+	var to FileCacheItem
+	GobDecode(fileData, &to)
+	if to.Expired.Before(time.Now()) {
+		return ""
+	}
+	return to.Data
+}
+
+// GetMulti gets values from file cache.
+// if non-exist or expired, return empty string.
+func (fc *FileCache) GetMulti(keys []string) []interface{} {
+	var rc []interface{}
+	for _, key := range keys {
+		rc = append(rc, fc.Get(key))
+	}
+	return rc
+}
+
+// Put value into file cache.
+// timeout means how long to keep this file, unit of ms.
+// if timeout equals FileCacheEmbedExpiry(default is 0), cache this item forever.
+func (fc *FileCache) Put(key string, val interface{}, timeout time.Duration) error {
+	gob.Register(val)
+
+	item := FileCacheItem{Data: val}
+	if timeout == FileCacheEmbedExpiry {
+		item.Expired = time.Now().Add((86400 * 365 * 10) * time.Second) // ten years
+	} else {
+		item.Expired = time.Now().Add(timeout)
+	}
+	item.Lastaccess = time.Now()
+	data, err := GobEncode(item)
+	if err != nil {
+		return err
+	}
+	return FilePutContents(fc.getCacheFileName(key), data)
+}
+
+// Delete file cache value.
+func (fc *FileCache) Delete(key string) error {
+	filename := fc.getCacheFileName(key)
+	if ok, _ := exists(filename); ok {
+		return os.Remove(filename)
+	}
+	return nil
+}
+
+// Incr will increase cached int value.
+// fc value is saving forever unless Delete.
+func (fc *FileCache) Incr(key string) error {
+	data := fc.Get(key)
+	var incr int
+	if reflect.TypeOf(data).Name() != "int" {
+		incr = 0
+	} else {
+		incr = data.(int) + 1
+	}
+	fc.Put(key, incr, FileCacheEmbedExpiry)
+	return nil
+}
+
+// Decr will decrease cached int value.
+func (fc *FileCache) Decr(key string) error {
+	data := fc.Get(key)
+	var decr int
+	if reflect.TypeOf(data).Name() != "int" || data.(int)-1 <= 0 {
+		decr = 0
+	} else {
+		decr = data.(int) - 1
+	}
+	fc.Put(key, decr, FileCacheEmbedExpiry)
+	return nil
+}
+
+// IsExist check value is exist.
+func (fc *FileCache) IsExist(key string) bool {
+	ret, _ := exists(fc.getCacheFileName(key))
+	return ret
+}
+
+// ClearAll will clean cached files.
+// not implemented.
+func (fc *FileCache) ClearAll() error {
+	return nil
+}
+
+// check file exist.
+func exists(path string) (bool, error) {
+	_, err := os.Stat(path)
+	if err == nil {
+		return true, nil
+	}
+	if os.IsNotExist(err) {
+		return false, nil
+	}
+	return false, err
+}
+
+// FileGetContents Get bytes to file.
+// if non-exist, create this file.
+func FileGetContents(filename string) (data []byte, e error) {
+	f, e := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, os.ModePerm)
+	if e != nil {
+		return
+	}
+	defer f.Close()
+	stat, e := f.Stat()
+	if e != nil {
+		return
+	}
+	data = make([]byte, stat.Size())
+	result, e := f.Read(data)
+	if e != nil || int64(result) != stat.Size() {
+		return nil, e
+	}
+	return
+}
+
+// FilePutContents Put bytes to file.
+// if non-exist, create this file.
+func FilePutContents(filename string, content []byte) error {
+	fp, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, os.ModePerm)
+	if err != nil {
+		return err
+	}
+	defer fp.Close()
+	_, err = fp.Write(content)
+	return err
+}
+
+// GobEncode Gob encodes file cache item.
+func GobEncode(data interface{}) ([]byte, error) {
+	buf := bytes.NewBuffer(nil)
+	enc := gob.NewEncoder(buf)
+	err := enc.Encode(data)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), err
+}
+
+// GobDecode Gob decodes file cache item.
+func GobDecode(data []byte, to *FileCacheItem) error {
+	buf := bytes.NewBuffer(data)
+	dec := gob.NewDecoder(buf)
+	return dec.Decode(&to)
+}
+
+func init() {
+	Register("file", NewFileCache)
+}

+ 190 - 0
go/gopath/src/github.com/astaxie/beego/cache/memcache/memcache.go

@@ -0,0 +1,190 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package memcache for cache provider
+//
+// depend on github.com/bradfitz/gomemcache/memcache
+//
+// go install github.com/bradfitz/gomemcache/memcache
+//
+// Usage:
+// import(
+//   _ "github.com/astaxie/beego/cache/memcache"
+//   "github.com/astaxie/beego/cache"
+// )
+//
+//  bm, err := cache.NewCache("memcache", `{"conn":"127.0.0.1:11211"}`)
+//
+//  more docs http://beego.me/docs/module/cache.md
+package memcache
+
+import (
+	"encoding/json"
+	"errors"
+	"strings"
+
+	"github.com/bradfitz/gomemcache/memcache"
+
+	"time"
+
+	"github.com/astaxie/beego/cache"
+)
+
+// Cache Memcache adapter.
+type Cache struct {
+	conn     *memcache.Client
+	conninfo []string
+}
+
+// NewMemCache create new memcache adapter.
+func NewMemCache() cache.Cache {
+	return &Cache{}
+}
+
+// Get get value from memcache.
+func (rc *Cache) Get(key string) interface{} {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	if item, err := rc.conn.Get(key); err == nil {
+		return string(item.Value)
+	}
+	return nil
+}
+
+// GetMulti get value from memcache.
+func (rc *Cache) GetMulti(keys []string) []interface{} {
+	size := len(keys)
+	var rv []interface{}
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			for i := 0; i < size; i++ {
+				rv = append(rv, err)
+			}
+			return rv
+		}
+	}
+	mv, err := rc.conn.GetMulti(keys)
+	if err == nil {
+		for _, v := range mv {
+			rv = append(rv, string(v.Value))
+		}
+		return rv
+	}
+	for i := 0; i < size; i++ {
+		rv = append(rv, err)
+	}
+	return rv
+}
+
+// Put put value to memcache. only support string.
+func (rc *Cache) Put(key string, val interface{}, timeout time.Duration) error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	v, ok := val.(string)
+	if !ok {
+		return errors.New("val must string")
+	}
+	item := memcache.Item{Key: key, Value: []byte(v), Expiration: int32(timeout / time.Second)}
+	return rc.conn.Set(&item)
+}
+
+// Delete delete value in memcache.
+func (rc *Cache) Delete(key string) error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	return rc.conn.Delete(key)
+}
+
+// Incr increase counter.
+func (rc *Cache) Incr(key string) error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	_, err := rc.conn.Increment(key, 1)
+	return err
+}
+
+// Decr decrease counter.
+func (rc *Cache) Decr(key string) error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	_, err := rc.conn.Decrement(key, 1)
+	return err
+}
+
+// IsExist check value exists in memcache.
+func (rc *Cache) IsExist(key string) bool {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return false
+		}
+	}
+	_, err := rc.conn.Get(key)
+	if err != nil {
+		return false
+	}
+	return true
+}
+
+// ClearAll clear all cached in memcache.
+func (rc *Cache) ClearAll() error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	return rc.conn.FlushAll()
+}
+
+// StartAndGC start memcache adapter.
+// config string is like {"conn":"connection info"}.
+// if connecting error, return.
+func (rc *Cache) StartAndGC(config string) error {
+	var cf map[string]string
+	json.Unmarshal([]byte(config), &cf)
+	if _, ok := cf["conn"]; !ok {
+		return errors.New("config has no conn key")
+	}
+	rc.conninfo = strings.Split(cf["conn"], ";")
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// connect to memcache and keep the connection.
+func (rc *Cache) connectInit() error {
+	rc.conn = memcache.New(rc.conninfo...)
+	return nil
+}
+
+func init() {
+	cache.Register("memcache", NewMemCache)
+}

+ 108 - 0
go/gopath/src/github.com/astaxie/beego/cache/memcache/memcache_test.go

@@ -0,0 +1,108 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package memcache
+
+import (
+	_ "github.com/bradfitz/gomemcache/memcache"
+
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/astaxie/beego/cache"
+)
+
+func TestMemcacheCache(t *testing.T) {
+	bm, err := cache.NewCache("memcache", `{"conn": "127.0.0.1:11211"}`)
+	if err != nil {
+		t.Error("init err")
+	}
+	timeoutDuration := 10 * time.Second
+	if err = bm.Put("astaxie", "1", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+
+	time.Sleep(11 * time.Second)
+
+	if bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+	if err = bm.Put("astaxie", "1", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+
+	if v, err := strconv.Atoi(bm.Get("astaxie").(string)); err != nil || v != 1 {
+		t.Error("get err")
+	}
+
+	if err = bm.Incr("astaxie"); err != nil {
+		t.Error("Incr Error", err)
+	}
+
+	if v, err := strconv.Atoi(bm.Get("astaxie").(string)); err != nil || v != 2 {
+		t.Error("get err")
+	}
+
+	if err = bm.Decr("astaxie"); err != nil {
+		t.Error("Decr Error", err)
+	}
+
+	if v, err := strconv.Atoi(bm.Get("astaxie").(string)); err != nil || v != 1 {
+		t.Error("get err")
+	}
+	bm.Delete("astaxie")
+	if bm.IsExist("astaxie") {
+		t.Error("delete err")
+	}
+
+	//test string
+	if err = bm.Put("astaxie", "author", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+
+	if v := bm.Get("astaxie").(string); v != "author" {
+		t.Error("get err")
+	}
+
+	//test GetMulti
+	if err = bm.Put("astaxie1", "author1", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie1") {
+		t.Error("check err")
+	}
+
+	vv := bm.GetMulti([]string{"astaxie", "astaxie1"})
+	if len(vv) != 2 {
+		t.Error("GetMulti ERROR")
+	}
+	if vv[0].(string) != "author" && vv[0].(string) != "author1" {
+		t.Error("GetMulti ERROR")
+	}
+	if vv[1].(string) != "author1" && vv[1].(string) != "author" {
+		t.Error("GetMulti ERROR")
+	}
+
+	// test clear all
+	if err = bm.ClearAll(); err != nil {
+		t.Error("clear all err")
+	}
+}

+ 244 - 0
go/gopath/src/github.com/astaxie/beego/cache/memory.go

@@ -0,0 +1,244 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cache
+
+import (
+	"encoding/json"
+	"errors"
+	"sync"
+	"time"
+)
+
+var (
+	// DefaultEvery means the clock time of recycling the expired cache items in memory.
+	DefaultEvery = 60 // 1 minute
+)
+
+// MemoryItem store memory cache item.
+type MemoryItem struct {
+	val         interface{}
+	createdTime time.Time
+	lifespan    time.Duration
+}
+
+func (mi *MemoryItem) isExpire() bool {
+	// 0 means forever
+	if mi.lifespan == 0 {
+		return false
+	}
+	return time.Now().Sub(mi.createdTime) > mi.lifespan
+}
+
+// MemoryCache is Memory cache adapter.
+// it contains a RW locker for safe map storage.
+type MemoryCache struct {
+	sync.RWMutex
+	dur   time.Duration
+	items map[string]*MemoryItem
+	Every int // run an expiration check Every clock time
+}
+
+// NewMemoryCache returns a new MemoryCache.
+func NewMemoryCache() Cache {
+	cache := MemoryCache{items: make(map[string]*MemoryItem)}
+	return &cache
+}
+
+// Get cache from memory.
+// if non-existed or expired, return nil.
+func (bc *MemoryCache) Get(name string) interface{} {
+	bc.RLock()
+	defer bc.RUnlock()
+	if itm, ok := bc.items[name]; ok {
+		if itm.isExpire() {
+			return nil
+		}
+		return itm.val
+	}
+	return nil
+}
+
+// GetMulti gets caches from memory.
+// if non-existed or expired, return nil.
+func (bc *MemoryCache) GetMulti(names []string) []interface{} {
+	var rc []interface{}
+	for _, name := range names {
+		rc = append(rc, bc.Get(name))
+	}
+	return rc
+}
+
+// Put cache to memory.
+// if lifespan is 0, it will be forever till restart.
+func (bc *MemoryCache) Put(name string, value interface{}, lifespan time.Duration) error {
+	bc.Lock()
+	defer bc.Unlock()
+	bc.items[name] = &MemoryItem{
+		val:         value,
+		createdTime: time.Now(),
+		lifespan:    lifespan,
+	}
+	return nil
+}
+
+// Delete cache in memory.
+func (bc *MemoryCache) Delete(name string) error {
+	bc.Lock()
+	defer bc.Unlock()
+	if _, ok := bc.items[name]; !ok {
+		return errors.New("key not exist")
+	}
+	delete(bc.items, name)
+	if _, ok := bc.items[name]; ok {
+		return errors.New("delete key error")
+	}
+	return nil
+}
+
+// Incr increase cache counter in memory.
+// it supports int,int32,int64,uint,uint32,uint64.
+func (bc *MemoryCache) Incr(key string) error {
+	bc.RLock()
+	defer bc.RUnlock()
+	itm, ok := bc.items[key]
+	if !ok {
+		return errors.New("key not exist")
+	}
+	switch itm.val.(type) {
+	case int:
+		itm.val = itm.val.(int) + 1
+	case int32:
+		itm.val = itm.val.(int32) + 1
+	case int64:
+		itm.val = itm.val.(int64) + 1
+	case uint:
+		itm.val = itm.val.(uint) + 1
+	case uint32:
+		itm.val = itm.val.(uint32) + 1
+	case uint64:
+		itm.val = itm.val.(uint64) + 1
+	default:
+		return errors.New("item val is not (u)int (u)int32 (u)int64")
+	}
+	return nil
+}
+
+// Decr decrease counter in memory.
+func (bc *MemoryCache) Decr(key string) error {
+	bc.RLock()
+	defer bc.RUnlock()
+	itm, ok := bc.items[key]
+	if !ok {
+		return errors.New("key not exist")
+	}
+	switch itm.val.(type) {
+	case int:
+		itm.val = itm.val.(int) - 1
+	case int64:
+		itm.val = itm.val.(int64) - 1
+	case int32:
+		itm.val = itm.val.(int32) - 1
+	case uint:
+		if itm.val.(uint) > 0 {
+			itm.val = itm.val.(uint) - 1
+		} else {
+			return errors.New("item val is less than 0")
+		}
+	case uint32:
+		if itm.val.(uint32) > 0 {
+			itm.val = itm.val.(uint32) - 1
+		} else {
+			return errors.New("item val is less than 0")
+		}
+	case uint64:
+		if itm.val.(uint64) > 0 {
+			itm.val = itm.val.(uint64) - 1
+		} else {
+			return errors.New("item val is less than 0")
+		}
+	default:
+		return errors.New("item val is not int int64 int32")
+	}
+	return nil
+}
+
+// IsExist check cache exist in memory.
+func (bc *MemoryCache) IsExist(name string) bool {
+	bc.RLock()
+	defer bc.RUnlock()
+	if v, ok := bc.items[name]; ok {
+		return !v.isExpire()
+	}
+	return false
+}
+
+// ClearAll will delete all cache in memory.
+func (bc *MemoryCache) ClearAll() error {
+	bc.Lock()
+	defer bc.Unlock()
+	bc.items = make(map[string]*MemoryItem)
+	return nil
+}
+
+// StartAndGC start memory cache. it will check expiration in every clock time.
+func (bc *MemoryCache) StartAndGC(config string) error {
+	var cf map[string]int
+	json.Unmarshal([]byte(config), &cf)
+	if _, ok := cf["interval"]; !ok {
+		cf = make(map[string]int)
+		cf["interval"] = DefaultEvery
+	}
+	dur := time.Duration(cf["interval"]) * time.Second
+	bc.Every = cf["interval"]
+	bc.dur = dur
+	go bc.vaccuum()
+	return nil
+}
+
+// check expiration.
+func (bc *MemoryCache) vaccuum() {
+	if bc.Every < 1 {
+		return
+	}
+	for {
+		<-time.After(bc.dur)
+		if bc.items == nil {
+			return
+		}
+		for name := range bc.items {
+			bc.itemExpired(name)
+		}
+	}
+}
+
+// itemExpired returns true if an item is expired.
+func (bc *MemoryCache) itemExpired(name string) bool {
+	bc.Lock()
+	defer bc.Unlock()
+
+	itm, ok := bc.items[name]
+	if !ok {
+		return true
+	}
+	if itm.isExpire() {
+		delete(bc.items, name)
+		return true
+	}
+	return false
+}
+
+func init() {
+	Register("memory", NewMemoryCache)
+}

+ 240 - 0
go/gopath/src/github.com/astaxie/beego/cache/redis/redis.go

@@ -0,0 +1,240 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package redis for cache provider
+//
+// depend on github.com/garyburd/redigo/redis
+//
+// go install github.com/garyburd/redigo/redis
+//
+// Usage:
+// import(
+//   _ "github.com/astaxie/beego/cache/redis"
+//   "github.com/astaxie/beego/cache"
+// )
+//
+//  bm, err := cache.NewCache("redis", `{"conn":"127.0.0.1:11211"}`)
+//
+//  more docs http://beego.me/docs/module/cache.md
+package redis
+
+import (
+	"encoding/json"
+	"errors"
+	"strconv"
+	"time"
+
+	"github.com/garyburd/redigo/redis"
+
+	"github.com/astaxie/beego/cache"
+)
+
+var (
+	// DefaultKey the collection name of redis for cache adapter.
+	DefaultKey = "beecacheRedis"
+)
+
+// Cache is Redis cache adapter.
+type Cache struct {
+	p        *redis.Pool // redis connection pool
+	conninfo string
+	dbNum    int
+	key      string
+	password string
+}
+
+// NewRedisCache create new redis cache with default collection name.
+func NewRedisCache() cache.Cache {
+	return &Cache{key: DefaultKey}
+}
+
+// actually do the redis cmds
+func (rc *Cache) do(commandName string, args ...interface{}) (reply interface{}, err error) {
+	c := rc.p.Get()
+	defer c.Close()
+
+	return c.Do(commandName, args...)
+}
+
+// Get cache from redis.
+func (rc *Cache) Get(key string) interface{} {
+	if v, err := rc.do("GET", key); err == nil {
+		return v
+	}
+	return nil
+}
+
+// GetMulti get cache from redis.
+func (rc *Cache) GetMulti(keys []string) []interface{} {
+	size := len(keys)
+	var rv []interface{}
+	c := rc.p.Get()
+	defer c.Close()
+	var err error
+	for _, key := range keys {
+		err = c.Send("GET", key)
+		if err != nil {
+			goto ERROR
+		}
+	}
+	if err = c.Flush(); err != nil {
+		goto ERROR
+	}
+	for i := 0; i < size; i++ {
+		if v, err := c.Receive(); err == nil {
+			rv = append(rv, v.([]byte))
+		} else {
+			rv = append(rv, err)
+		}
+	}
+	return rv
+ERROR:
+	rv = rv[0:0]
+	for i := 0; i < size; i++ {
+		rv = append(rv, nil)
+	}
+
+	return rv
+}
+
+// Put put cache to redis.
+func (rc *Cache) Put(key string, val interface{}, timeout time.Duration) error {
+	var err error
+	if _, err = rc.do("SETEX", key, int64(timeout/time.Second), val); err != nil {
+		return err
+	}
+
+	if _, err = rc.do("HSET", rc.key, key, true); err != nil {
+		return err
+	}
+	return err
+}
+
+// Delete delete cache in redis.
+func (rc *Cache) Delete(key string) error {
+	var err error
+	if _, err = rc.do("DEL", key); err != nil {
+		return err
+	}
+	_, err = rc.do("HDEL", rc.key, key)
+	return err
+}
+
+// IsExist check cache's existence in redis.
+func (rc *Cache) IsExist(key string) bool {
+	v, err := redis.Bool(rc.do("EXISTS", key))
+	if err != nil {
+		return false
+	}
+	if v == false {
+		if _, err = rc.do("HDEL", rc.key, key); err != nil {
+			return false
+		}
+	}
+	return v
+}
+
+// Incr increase counter in redis.
+func (rc *Cache) Incr(key string) error {
+	_, err := redis.Bool(rc.do("INCRBY", key, 1))
+	return err
+}
+
+// Decr decrease counter in redis.
+func (rc *Cache) Decr(key string) error {
+	_, err := redis.Bool(rc.do("INCRBY", key, -1))
+	return err
+}
+
+// ClearAll clean all cache in redis. delete this redis collection.
+func (rc *Cache) ClearAll() error {
+	cachedKeys, err := redis.Strings(rc.do("HKEYS", rc.key))
+	if err != nil {
+		return err
+	}
+	for _, str := range cachedKeys {
+		if _, err = rc.do("DEL", str); err != nil {
+			return err
+		}
+	}
+	_, err = rc.do("DEL", rc.key)
+	return err
+}
+
+// StartAndGC start redis cache adapter.
+// config is like {"key":"collection key","conn":"connection info","dbNum":"0"}
+// the cache item in redis are stored forever,
+// so no gc operation.
+func (rc *Cache) StartAndGC(config string) error {
+	var cf map[string]string
+	json.Unmarshal([]byte(config), &cf)
+
+	if _, ok := cf["key"]; !ok {
+		cf["key"] = DefaultKey
+	}
+	if _, ok := cf["conn"]; !ok {
+		return errors.New("config has no conn key")
+	}
+	if _, ok := cf["dbNum"]; !ok {
+		cf["dbNum"] = "0"
+	}
+	if _, ok := cf["password"]; !ok {
+		cf["password"] = ""
+	}
+	rc.key = cf["key"]
+	rc.conninfo = cf["conn"]
+	rc.dbNum, _ = strconv.Atoi(cf["dbNum"])
+	rc.password = cf["password"]
+
+	rc.connectInit()
+
+	c := rc.p.Get()
+	defer c.Close()
+
+	return c.Err()
+}
+
+// connect to redis.
+func (rc *Cache) connectInit() {
+	dialFunc := func() (c redis.Conn, err error) {
+		c, err = redis.Dial("tcp", rc.conninfo)
+		if err != nil {
+			return nil, err
+		}
+
+		if rc.password != "" {
+			if _, err := c.Do("AUTH", rc.password); err != nil {
+				c.Close()
+				return nil, err
+			}
+		}
+
+		_, selecterr := c.Do("SELECT", rc.dbNum)
+		if selecterr != nil {
+			c.Close()
+			return nil, selecterr
+		}
+		return
+	}
+	// initialize a new pool
+	rc.p = &redis.Pool{
+		MaxIdle:     3,
+		IdleTimeout: 180 * time.Second,
+		Dial:        dialFunc,
+	}
+}
+
+func init() {
+	cache.Register("redis", NewRedisCache)
+}

+ 107 - 0
go/gopath/src/github.com/astaxie/beego/cache/redis/redis_test.go

@@ -0,0 +1,107 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package redis
+
+import (
+	"testing"
+	"time"
+
+	"github.com/garyburd/redigo/redis"
+
+	"github.com/astaxie/beego/cache"
+)
+
+func TestRedisCache(t *testing.T) {
+	bm, err := cache.NewCache("redis", `{"conn": "127.0.0.1:6379"}`)
+	if err != nil {
+		t.Error("init err")
+	}
+	timeoutDuration := 10 * time.Second
+	if err = bm.Put("astaxie", 1, timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+
+	time.Sleep(11 * time.Second)
+
+	if bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+	if err = bm.Put("astaxie", 1, timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+
+	if v, _ := redis.Int(bm.Get("astaxie"), err); v != 1 {
+		t.Error("get err")
+	}
+
+	if err = bm.Incr("astaxie"); err != nil {
+		t.Error("Incr Error", err)
+	}
+
+	if v, _ := redis.Int(bm.Get("astaxie"), err); v != 2 {
+		t.Error("get err")
+	}
+
+	if err = bm.Decr("astaxie"); err != nil {
+		t.Error("Decr Error", err)
+	}
+
+	if v, _ := redis.Int(bm.Get("astaxie"), err); v != 1 {
+		t.Error("get err")
+	}
+	bm.Delete("astaxie")
+	if bm.IsExist("astaxie") {
+		t.Error("delete err")
+	}
+
+	//test string
+	if err = bm.Put("astaxie", "author", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie") {
+		t.Error("check err")
+	}
+
+	if v, _ := redis.String(bm.Get("astaxie"), err); v != "author" {
+		t.Error("get err")
+	}
+
+	//test GetMulti
+	if err = bm.Put("astaxie1", "author1", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !bm.IsExist("astaxie1") {
+		t.Error("check err")
+	}
+
+	vv := bm.GetMulti([]string{"astaxie", "astaxie1"})
+	if len(vv) != 2 {
+		t.Error("GetMulti ERROR")
+	}
+	if v, _ := redis.String(vv[0], nil); v != "author" {
+		t.Error("GetMulti ERROR")
+	}
+	if v, _ := redis.String(vv[1], nil); v != "author1" {
+		t.Error("GetMulti ERROR")
+	}
+
+	// test clear all
+	if err = bm.ClearAll(); err != nil {
+		t.Error("clear all err")
+	}
+}

+ 240 - 0
go/gopath/src/github.com/astaxie/beego/cache/ssdb/ssdb.go

@@ -0,0 +1,240 @@
+package ssdb
+
+import (
+	"encoding/json"
+	"errors"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/ssdb/gossdb/ssdb"
+
+	"github.com/astaxie/beego/cache"
+)
+
+// Cache SSDB adapter
+type Cache struct {
+	conn     *ssdb.Client
+	conninfo []string
+}
+
+//NewSsdbCache create new ssdb adapter.
+func NewSsdbCache() cache.Cache {
+	return &Cache{}
+}
+
+// Get get value from memcache.
+func (rc *Cache) Get(key string) interface{} {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return nil
+		}
+	}
+	value, err := rc.conn.Get(key)
+	if err == nil {
+		return value
+	}
+	return nil
+}
+
+// GetMulti get value from memcache.
+func (rc *Cache) GetMulti(keys []string) []interface{} {
+	size := len(keys)
+	var values []interface{}
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			for i := 0; i < size; i++ {
+				values = append(values, err)
+			}
+			return values
+		}
+	}
+	res, err := rc.conn.Do("multi_get", keys)
+	resSize := len(res)
+	if err == nil {
+		for i := 1; i < resSize; i += 2 {
+			values = append(values, string(res[i+1]))
+		}
+		return values
+	}
+	for i := 0; i < size; i++ {
+		values = append(values, err)
+	}
+	return values
+}
+
+// DelMulti get value from memcache.
+func (rc *Cache) DelMulti(keys []string) error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	_, err := rc.conn.Do("multi_del", keys)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Put put value to memcache. only support string.
+func (rc *Cache) Put(key string, value interface{}, timeout time.Duration) error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	v, ok := value.(string)
+	if !ok {
+		return errors.New("value must string")
+	}
+	var resp []string
+	var err error
+	ttl := int(timeout / time.Second)
+	if ttl < 0 {
+		resp, err = rc.conn.Do("set", key, v)
+	} else {
+		resp, err = rc.conn.Do("setx", key, v, ttl)
+	}
+	if err != nil {
+		return err
+	}
+	if len(resp) == 2 && resp[0] == "ok" {
+		return nil
+	}
+	return errors.New("bad response")
+}
+
+// Delete delete value in memcache.
+func (rc *Cache) Delete(key string) error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	_, err := rc.conn.Del(key)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Incr increase counter.
+func (rc *Cache) Incr(key string) error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	_, err := rc.conn.Do("incr", key, 1)
+	return err
+}
+
+// Decr decrease counter.
+func (rc *Cache) Decr(key string) error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	_, err := rc.conn.Do("incr", key, -1)
+	return err
+}
+
+// IsExist check value exists in memcache.
+func (rc *Cache) IsExist(key string) bool {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return false
+		}
+	}
+	resp, err := rc.conn.Do("exists", key)
+	if err != nil {
+		return false
+	}
+	if resp[1] == "1" {
+		return true
+	}
+	return false
+
+}
+
+// ClearAll clear all cached in memcache.
+func (rc *Cache) ClearAll() error {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	keyStart, keyEnd, limit := "", "", 50
+	resp, err := rc.Scan(keyStart, keyEnd, limit)
+	for err == nil {
+		size := len(resp)
+		if size == 1 {
+			return nil
+		}
+		keys := []string{}
+		for i := 1; i < size; i += 2 {
+			keys = append(keys, string(resp[i]))
+		}
+		_, e := rc.conn.Do("multi_del", keys)
+		if e != nil {
+			return e
+		}
+		keyStart = resp[size-2]
+		resp, err = rc.Scan(keyStart, keyEnd, limit)
+	}
+	return err
+}
+
+// Scan key all cached in ssdb.
+func (rc *Cache) Scan(keyStart string, keyEnd string, limit int) ([]string, error) {
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return nil, err
+		}
+	}
+	resp, err := rc.conn.Do("scan", keyStart, keyEnd, limit)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// StartAndGC start memcache adapter.
+// config string is like {"conn":"connection info"}.
+// if connecting error, return.
+func (rc *Cache) StartAndGC(config string) error {
+	var cf map[string]string
+	json.Unmarshal([]byte(config), &cf)
+	if _, ok := cf["conn"]; !ok {
+		return errors.New("config has no conn key")
+	}
+	rc.conninfo = strings.Split(cf["conn"], ";")
+	if rc.conn == nil {
+		if err := rc.connectInit(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// connect to memcache and keep the connection.
+func (rc *Cache) connectInit() error {
+	conninfoArray := strings.Split(rc.conninfo[0], ":")
+	host := conninfoArray[0]
+	port, e := strconv.Atoi(conninfoArray[1])
+	if e != nil {
+		return e
+	}
+	var err error
+	rc.conn, err = ssdb.Connect(host, port)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func init() {
+	cache.Register("ssdb", NewSsdbCache)
+}

+ 103 - 0
go/gopath/src/github.com/astaxie/beego/cache/ssdb/ssdb_test.go

@@ -0,0 +1,103 @@
+package ssdb
+
+import (
+	"github.com/astaxie/beego/cache"
+	"strconv"
+	"testing"
+	"time"
+)
+
+func TestSsdbcacheCache(t *testing.T) {
+	ssdb, err := cache.NewCache("ssdb", `{"conn": "127.0.0.1:8888"}`)
+	if err != nil {
+		t.Error("init err")
+	}
+
+	// test put and exist
+	if ssdb.IsExist("ssdb") {
+		t.Error("check err")
+	}
+	timeoutDuration := 10 * time.Second
+	//timeoutDuration := -10*time.Second   if timeoutDuration is negtive,it means permanent
+	if err = ssdb.Put("ssdb", "ssdb", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if !ssdb.IsExist("ssdb") {
+		t.Error("check err")
+	}
+
+	// Get test done
+	if err = ssdb.Put("ssdb", "ssdb", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+
+	if v := ssdb.Get("ssdb"); v != "ssdb" {
+		t.Error("get Error")
+	}
+
+	//inc/dec test done
+	if err = ssdb.Put("ssdb", "2", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if err = ssdb.Incr("ssdb"); err != nil {
+		t.Error("incr Error", err)
+	}
+
+	if v, err := strconv.Atoi(ssdb.Get("ssdb").(string)); err != nil || v != 3 {
+		t.Error("get err")
+	}
+
+	if err = ssdb.Decr("ssdb"); err != nil {
+		t.Error("decr error")
+	}
+
+	// test del
+	if err = ssdb.Put("ssdb", "3", timeoutDuration); err != nil {
+		t.Error("set Error", err)
+	}
+	if v, err := strconv.Atoi(ssdb.Get("ssdb").(string)); err != nil || v != 3 {
+		t.Error("get err")
+	}
+	if err := ssdb.Delete("ssdb"); err == nil {
+		if ssdb.IsExist("ssdb") {
+			t.Error("delete err")
+		}
+	}
+
+	//test string
+	if err = ssdb.Put("ssdb", "ssdb", -10*time.Second); err != nil {
+		t.Error("set Error", err)
+	}
+	if !ssdb.IsExist("ssdb") {
+		t.Error("check err")
+	}
+	if v := ssdb.Get("ssdb").(string); v != "ssdb" {
+		t.Error("get err")
+	}
+
+	//test GetMulti done
+	if err = ssdb.Put("ssdb1", "ssdb1", -10*time.Second); err != nil {
+		t.Error("set Error", err)
+	}
+	if !ssdb.IsExist("ssdb1") {
+		t.Error("check err")
+	}
+	vv := ssdb.GetMulti([]string{"ssdb", "ssdb1"})
+	if len(vv) != 2 {
+		t.Error("getmulti error")
+	}
+	if vv[0].(string) != "ssdb" {
+		t.Error("getmulti error")
+	}
+	if vv[1].(string) != "ssdb1" {
+		t.Error("getmulti error")
+	}
+
+	// test clear all done
+	if err = ssdb.ClearAll(); err != nil {
+		t.Error("clear all err")
+	}
+	if ssdb.IsExist("ssdb") || ssdb.IsExist("ssdb1") {
+		t.Error("check err")
+	}
+}

+ 442 - 0
go/gopath/src/github.com/astaxie/beego/config.go

@@ -0,0 +1,442 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package beego
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/astaxie/beego/config"
+	"github.com/astaxie/beego/session"
+	"github.com/astaxie/beego/utils"
+)
+
+// Config is the main struct for BConfig
+type Config struct {
+	AppName             string //Application name
+	RunMode             string //Running Mode: dev | prod
+	RouterCaseSensitive bool
+	ServerName          string
+	RecoverPanic        bool
+	CopyRequestBody     bool
+	EnableGzip          bool
+	MaxMemory           int64
+	EnableErrorsShow    bool
+	Listen              Listen
+	WebConfig           WebConfig
+	Log                 LogConfig
+}
+
+// Listen holds for http and https related config
+type Listen struct {
+	Graceful      bool // Graceful means use graceful module to start the server
+	ServerTimeOut int64
+	ListenTCP4    bool
+	EnableHTTP    bool
+	HTTPAddr      string
+	HTTPPort      int
+	EnableHTTPS   bool
+	HTTPSAddr     string
+	HTTPSPort     int
+	HTTPSCertFile string
+	HTTPSKeyFile  string
+	EnableAdmin   bool
+	AdminAddr     string
+	AdminPort     int
+	EnableFcgi    bool
+	EnableStdIo   bool // EnableStdIo works with EnableFcgi Use FCGI via standard I/O
+}
+
+// WebConfig holds web related config
+type WebConfig struct {
+	AutoRender             bool
+	EnableDocs             bool
+	FlashName              string
+	FlashSeparator         string
+	DirectoryIndex         bool
+	StaticDir              map[string]string
+	StaticExtensionsToGzip []string
+	TemplateLeft           string
+	TemplateRight          string
+	ViewsPath              string
+	EnableXSRF             bool
+	XSRFKey                string
+	XSRFExpire             int
+	Session                SessionConfig
+}
+
+// SessionConfig holds session related config
+type SessionConfig struct {
+	SessionOn             bool
+	SessionProvider       string
+	SessionName           string
+	SessionGCMaxLifetime  int64
+	SessionProviderConfig string
+	SessionCookieLifeTime int
+	SessionAutoSetCookie  bool
+	SessionDomain         string
+}
+
+// LogConfig holds Log related config
+type LogConfig struct {
+	AccessLogs  bool
+	FileLineNum bool
+	Outputs     map[string]string // Store Adaptor : config
+}
+
+var (
+	// BConfig is the default config for Application
+	BConfig *Config
+	// AppConfig is the instance of Config, store the config information from file
+	AppConfig *beegoAppConfig
+	// AppPath is the absolute path to the app
+	AppPath string
+	// GlobalSessions is the instance for the session manager
+	GlobalSessions *session.Manager
+
+	// appConfigPath is the path to the config files
+	appConfigPath string
+	// appConfigProvider is the provider for the config, default is ini
+	appConfigProvider = "ini"
+)
+
+func init() {
+	AppPath, _ = filepath.Abs(filepath.Dir(os.Args[0]))
+
+	os.Chdir(AppPath)
+
+	BConfig = &Config{
+		AppName:             "beego",
+		RunMode:             DEV,
+		RouterCaseSensitive: true,
+		ServerName:          "beegoServer:" + VERSION,
+		RecoverPanic:        true,
+		CopyRequestBody:     false,
+		EnableGzip:          false,
+		MaxMemory:           1 << 26, //64MB
+		EnableErrorsShow:    true,
+		Listen: Listen{
+			Graceful:      false,
+			ServerTimeOut: 0,
+			ListenTCP4:    false,
+			EnableHTTP:    true,
+			HTTPAddr:      "",
+			HTTPPort:      8080,
+			EnableHTTPS:   false,
+			HTTPSAddr:     "",
+			HTTPSPort:     10443,
+			HTTPSCertFile: "",
+			HTTPSKeyFile:  "",
+			EnableAdmin:   false,
+			AdminAddr:     "",
+			AdminPort:     8088,
+			EnableFcgi:    false,
+			EnableStdIo:   false,
+		},
+		WebConfig: WebConfig{
+			AutoRender:             true,
+			EnableDocs:             false,
+			FlashName:              "BEEGO_FLASH",
+			FlashSeparator:         "BEEGOFLASH",
+			DirectoryIndex:         false,
+			StaticDir:              map[string]string{"/static": "static"},
+			StaticExtensionsToGzip: []string{".css", ".js"},
+			TemplateLeft:           "{{",
+			TemplateRight:          "}}",
+			ViewsPath:              "views",
+			EnableXSRF:             false,
+			XSRFKey:                "beegoxsrf",
+			XSRFExpire:             0,
+			Session: SessionConfig{
+				SessionOn:             false,
+				SessionProvider:       "memory",
+				SessionName:           "beegosessionID",
+				SessionGCMaxLifetime:  3600,
+				SessionProviderConfig: "",
+				SessionCookieLifeTime: 0, //set cookie default is the browser life
+				SessionAutoSetCookie:  true,
+				SessionDomain:         "",
+			},
+		},
+		Log: LogConfig{
+			AccessLogs:  false,
+			FileLineNum: true,
+			Outputs:     map[string]string{"console": ""},
+		},
+	}
+
+	appConfigPath = filepath.Join(AppPath, "conf", "app.conf")
+	if !utils.FileExists(appConfigPath) {
+		AppConfig = &beegoAppConfig{innerConfig: config.NewFakeConfig()}
+		return
+	}
+
+	if err := parseConfig(appConfigPath); err != nil {
+		panic(err)
+	}
+}
+
+// now only support ini, next will support json.
+func parseConfig(appConfigPath string) (err error) {
+	AppConfig, err = newAppConfig(appConfigProvider, appConfigPath)
+	if err != nil {
+		return err
+	}
+	// set the run mode first
+	if envRunMode := os.Getenv("BEEGO_RUNMODE"); envRunMode != "" {
+		BConfig.RunMode = envRunMode
+	} else if runMode := AppConfig.String("RunMode"); runMode != "" {
+		BConfig.RunMode = runMode
+	}
+
+	BConfig.AppName = AppConfig.DefaultString("AppName", BConfig.AppName)
+	BConfig.RecoverPanic = AppConfig.DefaultBool("RecoverPanic", BConfig.RecoverPanic)
+	BConfig.RouterCaseSensitive = AppConfig.DefaultBool("RouterCaseSensitive", BConfig.RouterCaseSensitive)
+	BConfig.ServerName = AppConfig.DefaultString("ServerName", BConfig.ServerName)
+	BConfig.EnableGzip = AppConfig.DefaultBool("EnableGzip", BConfig.EnableGzip)
+	BConfig.EnableErrorsShow = AppConfig.DefaultBool("EnableErrorsShow", BConfig.EnableErrorsShow)
+	BConfig.CopyRequestBody = AppConfig.DefaultBool("CopyRequestBody", BConfig.CopyRequestBody)
+	BConfig.MaxMemory = AppConfig.DefaultInt64("MaxMemory", BConfig.MaxMemory)
+	BConfig.Listen.Graceful = AppConfig.DefaultBool("Graceful", BConfig.Listen.Graceful)
+	BConfig.Listen.HTTPAddr = AppConfig.String("HTTPAddr")
+	BConfig.Listen.HTTPPort = AppConfig.DefaultInt("HTTPPort", BConfig.Listen.HTTPPort)
+	BConfig.Listen.ListenTCP4 = AppConfig.DefaultBool("ListenTCP4", BConfig.Listen.ListenTCP4)
+	BConfig.Listen.EnableHTTP = AppConfig.DefaultBool("EnableHTTP", BConfig.Listen.EnableHTTP)
+	BConfig.Listen.EnableHTTPS = AppConfig.DefaultBool("EnableHTTPS", BConfig.Listen.EnableHTTPS)
+	BConfig.Listen.HTTPSAddr = AppConfig.DefaultString("HTTPSAddr", BConfig.Listen.HTTPSAddr)
+	BConfig.Listen.HTTPSPort = AppConfig.DefaultInt("HTTPSPort", BConfig.Listen.HTTPSPort)
+	BConfig.Listen.HTTPSCertFile = AppConfig.DefaultString("HTTPSCertFile", BConfig.Listen.HTTPSCertFile)
+	BConfig.Listen.HTTPSKeyFile = AppConfig.DefaultString("HTTPSKeyFile", BConfig.Listen.HTTPSKeyFile)
+	BConfig.Listen.EnableAdmin = AppConfig.DefaultBool("EnableAdmin", BConfig.Listen.EnableAdmin)
+	BConfig.Listen.AdminAddr = AppConfig.DefaultString("AdminAddr", BConfig.Listen.AdminAddr)
+	BConfig.Listen.AdminPort = AppConfig.DefaultInt("AdminPort", BConfig.Listen.AdminPort)
+	BConfig.Listen.EnableFcgi = AppConfig.DefaultBool("EnableFcgi", BConfig.Listen.EnableFcgi)
+	BConfig.Listen.EnableStdIo = AppConfig.DefaultBool("EnableStdIo", BConfig.Listen.EnableStdIo)
+	BConfig.Listen.ServerTimeOut = AppConfig.DefaultInt64("ServerTimeOut", BConfig.Listen.ServerTimeOut)
+	BConfig.WebConfig.AutoRender = AppConfig.DefaultBool("AutoRender", BConfig.WebConfig.AutoRender)
+	BConfig.WebConfig.ViewsPath = AppConfig.DefaultString("ViewsPath", BConfig.WebConfig.ViewsPath)
+	BConfig.WebConfig.DirectoryIndex = AppConfig.DefaultBool("DirectoryIndex", BConfig.WebConfig.DirectoryIndex)
+	BConfig.WebConfig.FlashName = AppConfig.DefaultString("FlashName", BConfig.WebConfig.FlashName)
+	BConfig.WebConfig.FlashSeparator = AppConfig.DefaultString("FlashSeparator", BConfig.WebConfig.FlashSeparator)
+	BConfig.WebConfig.EnableDocs = AppConfig.DefaultBool("EnableDocs", BConfig.WebConfig.EnableDocs)
+	BConfig.WebConfig.XSRFKey = AppConfig.DefaultString("XSRFKEY", BConfig.WebConfig.XSRFKey)
+	BConfig.WebConfig.EnableXSRF = AppConfig.DefaultBool("EnableXSRF", BConfig.WebConfig.EnableXSRF)
+	BConfig.WebConfig.XSRFExpire = AppConfig.DefaultInt("XSRFExpire", BConfig.WebConfig.XSRFExpire)
+	BConfig.WebConfig.TemplateLeft = AppConfig.DefaultString("TemplateLeft", BConfig.WebConfig.TemplateLeft)
+	BConfig.WebConfig.TemplateRight = AppConfig.DefaultString("TemplateRight", BConfig.WebConfig.TemplateRight)
+	BConfig.WebConfig.Session.SessionOn = AppConfig.DefaultBool("SessionOn", BConfig.WebConfig.Session.SessionOn)
+	BConfig.WebConfig.Session.SessionProvider = AppConfig.DefaultString("SessionProvider", BConfig.WebConfig.Session.SessionProvider)
+	BConfig.WebConfig.Session.SessionName = AppConfig.DefaultString("SessionName", BConfig.WebConfig.Session.SessionName)
+	BConfig.WebConfig.Session.SessionProviderConfig = AppConfig.DefaultString("SessionProviderConfig", BConfig.WebConfig.Session.SessionProviderConfig)
+	BConfig.WebConfig.Session.SessionGCMaxLifetime = AppConfig.DefaultInt64("SessionGCMaxLifetime", BConfig.WebConfig.Session.SessionGCMaxLifetime)
+	BConfig.WebConfig.Session.SessionCookieLifeTime = AppConfig.DefaultInt("SessionCookieLifeTime", BConfig.WebConfig.Session.SessionCookieLifeTime)
+	BConfig.WebConfig.Session.SessionAutoSetCookie = AppConfig.DefaultBool("SessionAutoSetCookie", BConfig.WebConfig.Session.SessionAutoSetCookie)
+	BConfig.WebConfig.Session.SessionDomain = AppConfig.DefaultString("SessionDomain", BConfig.WebConfig.Session.SessionDomain)
+	BConfig.Log.AccessLogs = AppConfig.DefaultBool("LogAccessLogs", BConfig.Log.AccessLogs)
+	BConfig.Log.FileLineNum = AppConfig.DefaultBool("LogFileLineNum", BConfig.Log.FileLineNum)
+
+	if sd := AppConfig.String("StaticDir"); sd != "" {
+		for k := range BConfig.WebConfig.StaticDir {
+			delete(BConfig.WebConfig.StaticDir, k)
+		}
+		sds := strings.Fields(sd)
+		for _, v := range sds {
+			if url2fsmap := strings.SplitN(v, ":", 2); len(url2fsmap) == 2 {
+				BConfig.WebConfig.StaticDir["/"+strings.TrimRight(url2fsmap[0], "/")] = url2fsmap[1]
+			} else {
+				BConfig.WebConfig.StaticDir["/"+strings.TrimRight(url2fsmap[0], "/")] = url2fsmap[0]
+			}
+		}
+	}
+
+	if sgz := AppConfig.String("StaticExtensionsToGzip"); sgz != "" {
+		extensions := strings.Split(sgz, ",")
+		fileExts := []string{}
+		for _, ext := range extensions {
+			ext = strings.TrimSpace(ext)
+			if ext == "" {
+				continue
+			}
+			if !strings.HasPrefix(ext, ".") {
+				ext = "." + ext
+			}
+			fileExts = append(fileExts, ext)
+		}
+		if len(fileExts) > 0 {
+			BConfig.WebConfig.StaticExtensionsToGzip = fileExts
+		}
+	}
+
+	if lo := AppConfig.String("LogOutputs"); lo != "" {
+		los := strings.Split(lo, ";")
+		for _, v := range los {
+			if logType2Config := strings.SplitN(v, ",", 2); len(logType2Config) == 2 {
+				BConfig.Log.Outputs[logType2Config[0]] = logType2Config[1]
+			} else {
+				continue
+			}
+		}
+	}
+
+	//init log
+	BeeLogger.Reset()
+	for adaptor, config := range BConfig.Log.Outputs {
+		err = BeeLogger.SetLogger(adaptor, config)
+		if err != nil {
+			fmt.Printf("%s with the config `%s` got err:%s\n", adaptor, config, err)
+		}
+	}
+	SetLogFuncCall(BConfig.Log.FileLineNum)
+
+	return nil
+}
+
+// LoadAppConfig allow developer to apply a config file
+func LoadAppConfig(adapterName, configPath string) error {
+	absConfigPath, err := filepath.Abs(configPath)
+	if err != nil {
+		return err
+	}
+
+	if !utils.FileExists(absConfigPath) {
+		return fmt.Errorf("the target config file: %s don't exist", configPath)
+	}
+
+	if absConfigPath == appConfigPath {
+		return nil
+	}
+
+	appConfigPath = absConfigPath
+	appConfigProvider = adapterName
+
+	return parseConfig(appConfigPath)
+}
+
+type beegoAppConfig struct {
+	innerConfig config.Configer
+}
+
+func newAppConfig(appConfigProvider, appConfigPath string) (*beegoAppConfig, error) {
+	ac, err := config.NewConfig(appConfigProvider, appConfigPath)
+	if err != nil {
+		return nil, err
+	}
+	return &beegoAppConfig{ac}, nil
+}
+
+func (b *beegoAppConfig) Set(key, val string) error {
+	if err := b.innerConfig.Set(BConfig.RunMode+"::"+key, val); err != nil {
+		return err
+	}
+	return b.innerConfig.Set(key, val)
+}
+
+func (b *beegoAppConfig) String(key string) string {
+	if v := b.innerConfig.String(BConfig.RunMode + "::" + key); v != "" {
+		return v
+	}
+	return b.innerConfig.String(key)
+}
+
+func (b *beegoAppConfig) Strings(key string) []string {
+	if v := b.innerConfig.Strings(BConfig.RunMode + "::" + key); v[0] != "" {
+		return v
+	}
+	return b.innerConfig.Strings(key)
+}
+
+func (b *beegoAppConfig) Int(key string) (int, error) {
+	if v, err := b.innerConfig.Int(BConfig.RunMode + "::" + key); err == nil {
+		return v, nil
+	}
+	return b.innerConfig.Int(key)
+}
+
+func (b *beegoAppConfig) Int64(key string) (int64, error) {
+	if v, err := b.innerConfig.Int64(BConfig.RunMode + "::" + key); err == nil {
+		return v, nil
+	}
+	return b.innerConfig.Int64(key)
+}
+
+func (b *beegoAppConfig) Bool(key string) (bool, error) {
+	if v, err := b.innerConfig.Bool(BConfig.RunMode + "::" + key); err == nil {
+		return v, nil
+	}
+	return b.innerConfig.Bool(key)
+}
+
+func (b *beegoAppConfig) Float(key string) (float64, error) {
+	if v, err := b.innerConfig.Float(BConfig.RunMode + "::" + key); err == nil {
+		return v, nil
+	}
+	return b.innerConfig.Float(key)
+}
+
+func (b *beegoAppConfig) DefaultString(key string, defaultVal string) string {
+	if v := b.String(key); v != "" {
+		return v
+	}
+	return defaultVal
+}
+
+func (b *beegoAppConfig) DefaultStrings(key string, defaultVal []string) []string {
+	if v := b.Strings(key); len(v) != 0 {
+		return v
+	}
+	return defaultVal
+}
+
+func (b *beegoAppConfig) DefaultInt(key string, defaultVal int) int {
+	if v, err := b.Int(key); err == nil {
+		return v
+	}
+	return defaultVal
+}
+
+func (b *beegoAppConfig) DefaultInt64(key string, defaultVal int64) int64 {
+	if v, err := b.Int64(key); err == nil {
+		return v
+	}
+	return defaultVal
+}
+
+func (b *beegoAppConfig) DefaultBool(key string, defaultVal bool) bool {
+	if v, err := b.Bool(key); err == nil {
+		return v
+	}
+	return defaultVal
+}
+
+func (b *beegoAppConfig) DefaultFloat(key string, defaultVal float64) float64 {
+	if v, err := b.Float(key); err == nil {
+		return v
+	}
+	return defaultVal
+}
+
+func (b *beegoAppConfig) DIY(key string) (interface{}, error) {
+	return b.innerConfig.DIY(key)
+}
+
+func (b *beegoAppConfig) GetSection(section string) (map[string]string, error) {
+	return b.innerConfig.GetSection(section)
+}
+
+func (b *beegoAppConfig) SaveConfigFile(filename string) error {
+	return b.innerConfig.SaveConfigFile(filename)
+}

+ 144 - 0
go/gopath/src/github.com/astaxie/beego/config/config.go

@@ -0,0 +1,144 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package config is used to parse config
+// Usage:
+// import(
+//   "github.com/astaxie/beego/config"
+// )
+//
+//  cnf, err := config.NewConfig("ini", "config.conf")
+//
+//  cnf APIS:
+//
+//  cnf.Set(key, val string) error
+//  cnf.String(key string) string
+//  cnf.Strings(key string) []string
+//  cnf.Int(key string) (int, error)
+//  cnf.Int64(key string) (int64, error)
+//  cnf.Bool(key string) (bool, error)
+//  cnf.Float(key string) (float64, error)
+//  cnf.DefaultString(key string, defaultVal string) string
+//  cnf.DefaultStrings(key string, defaultVal []string) []string
+//  cnf.DefaultInt(key string, defaultVal int) int
+//  cnf.DefaultInt64(key string, defaultVal int64) int64
+//  cnf.DefaultBool(key string, defaultVal bool) bool
+//  cnf.DefaultFloat(key string, defaultVal float64) float64
+//  cnf.DIY(key string) (interface{}, error)
+//  cnf.GetSection(section string) (map[string]string, error)
+//  cnf.SaveConfigFile(filename string) error
+//
+//  more docs http://beego.me/docs/module/config.md
+package config
+
+import (
+	"fmt"
+)
+
+// Configer defines how to get and set value from configuration raw data.
+type Configer interface {
+	Set(key, val string) error   //support section::key type in given key when using ini type.
+	String(key string) string    //support section::key type in key string when using ini and json type; Int,Int64,Bool,Float,DIY are same.
+	Strings(key string) []string //get string slice
+	Int(key string) (int, error)
+	Int64(key string) (int64, error)
+	Bool(key string) (bool, error)
+	Float(key string) (float64, error)
+	DefaultString(key string, defaultVal string) string      // support section::key type in key string when using ini and json type; Int,Int64,Bool,Float,DIY are same.
+	DefaultStrings(key string, defaultVal []string) []string //get string slice
+	DefaultInt(key string, defaultVal int) int
+	DefaultInt64(key string, defaultVal int64) int64
+	DefaultBool(key string, defaultVal bool) bool
+	DefaultFloat(key string, defaultVal float64) float64
+	DIY(key string) (interface{}, error)
+	GetSection(section string) (map[string]string, error)
+	SaveConfigFile(filename string) error
+}
+
+// Config is the adapter interface for parsing config file to get raw data to Configer.
+type Config interface {
+	Parse(key string) (Configer, error)
+	ParseData(data []byte) (Configer, error)
+}
+
+var adapters = make(map[string]Config)
+
+// Register makes a config adapter available by the adapter name.
+// If Register is called twice with the same name or if driver is nil,
+// it panics.
+func Register(name string, adapter Config) {
+	if adapter == nil {
+		panic("config: Register adapter is nil")
+	}
+	if _, ok := adapters[name]; ok {
+		panic("config: Register called twice for adapter " + name)
+	}
+	adapters[name] = adapter
+}
+
+// NewConfig adapterName is ini/json/xml/yaml.
+// filename is the config file path.
+func NewConfig(adapterName, filename string) (Configer, error) {
+	adapter, ok := adapters[adapterName]
+	if !ok {
+		return nil, fmt.Errorf("config: unknown adaptername %q (forgotten import?)", adapterName)
+	}
+	return adapter.Parse(filename)
+}
+
+// NewConfigData adapterName is ini/json/xml/yaml.
+// data is the config data.
+func NewConfigData(adapterName string, data []byte) (Configer, error) {
+	adapter, ok := adapters[adapterName]
+	if !ok {
+		return nil, fmt.Errorf("config: unknown adaptername %q (forgotten import?)", adapterName)
+	}
+	return adapter.ParseData(data)
+}
+
+// ParseBool returns the boolean value represented by the string.
+//
+// It accepts 1, 1.0, t, T, TRUE, true, True, YES, yes, Yes,Y, y, ON, on, On,
+// 0, 0.0, f, F, FALSE, false, False, NO, no, No, N,n, OFF, off, Off.
+// Any other value returns an error.
+func ParseBool(val interface{}) (value bool, err error) {
+	if val != nil {
+		switch v := val.(type) {
+		case bool:
+			return v, nil
+		case string:
+			switch v {
+			case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "Y", "y", "ON", "on", "On":
+				return true, nil
+			case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "N", "n", "OFF", "off", "Off":
+				return false, nil
+			}
+		case int8, int32, int64:
+			strV := fmt.Sprintf("%s", v)
+			if strV == "1" {
+				return true, nil
+			} else if strV == "0" {
+				return false, nil
+			}
+		case float64:
+			if v == 1 {
+				return true, nil
+			} else if v == 0 {
+				return false, nil
+			}
+		}
+		return false, fmt.Errorf("parsing %q: invalid syntax", val)
+	}
+	return false, fmt.Errorf("parsing <nil>: invalid syntax")
+}

+ 134 - 0
go/gopath/src/github.com/astaxie/beego/config/fake.go

@@ -0,0 +1,134 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+	"errors"
+	"strconv"
+	"strings"
+)
+
+type fakeConfigContainer struct {
+	data map[string]string
+}
+
+func (c *fakeConfigContainer) getData(key string) string {
+	return c.data[strings.ToLower(key)]
+}
+
+func (c *fakeConfigContainer) Set(key, val string) error {
+	c.data[strings.ToLower(key)] = val
+	return nil
+}
+
+func (c *fakeConfigContainer) String(key string) string {
+	return c.getData(key)
+}
+
+func (c *fakeConfigContainer) DefaultString(key string, defaultval string) string {
+	v := c.getData(key)
+	if v == "" {
+		return defaultval
+	}
+	return v
+}
+
+func (c *fakeConfigContainer) Strings(key string) []string {
+	v := c.getData(key)
+	if v == "" {
+		return nil
+	}
+	return strings.Split(v, ";")
+}
+
+func (c *fakeConfigContainer) DefaultStrings(key string, defaultval []string) []string {
+	v := c.Strings(key)
+	if v == nil {
+		return defaultval
+	}
+	return v
+}
+
+func (c *fakeConfigContainer) Int(key string) (int, error) {
+	return strconv.Atoi(c.getData(key))
+}
+
+func (c *fakeConfigContainer) DefaultInt(key string, defaultval int) int {
+	v, err := c.Int(key)
+	if err != nil {
+		return defaultval
+	}
+	return v
+}
+
+func (c *fakeConfigContainer) Int64(key string) (int64, error) {
+	return strconv.ParseInt(c.getData(key), 10, 64)
+}
+
+func (c *fakeConfigContainer) DefaultInt64(key string, defaultval int64) int64 {
+	v, err := c.Int64(key)
+	if err != nil {
+		return defaultval
+	}
+	return v
+}
+
+func (c *fakeConfigContainer) Bool(key string) (bool, error) {
+	return ParseBool(c.getData(key))
+}
+
+func (c *fakeConfigContainer) DefaultBool(key string, defaultval bool) bool {
+	v, err := c.Bool(key)
+	if err != nil {
+		return defaultval
+	}
+	return v
+}
+
+func (c *fakeConfigContainer) Float(key string) (float64, error) {
+	return strconv.ParseFloat(c.getData(key), 64)
+}
+
+func (c *fakeConfigContainer) DefaultFloat(key string, defaultval float64) float64 {
+	v, err := c.Float(key)
+	if err != nil {
+		return defaultval
+	}
+	return v
+}
+
+func (c *fakeConfigContainer) DIY(key string) (interface{}, error) {
+	if v, ok := c.data[strings.ToLower(key)]; ok {
+		return v, nil
+	}
+	return nil, errors.New("key not find")
+}
+
+func (c *fakeConfigContainer) GetSection(section string) (map[string]string, error) {
+	return nil, errors.New("not implement in the fakeConfigContainer")
+}
+
+func (c *fakeConfigContainer) SaveConfigFile(filename string) error {
+	return errors.New("not implement in the fakeConfigContainer")
+}
+
+var _ Configer = new(fakeConfigContainer)
+
+// NewFakeConfig return a fake Congiger
+func NewFakeConfig() Configer {
+	return &fakeConfigContainer{
+		data: make(map[string]string),
+	}
+}

+ 464 - 0
go/gopath/src/github.com/astaxie/beego/config/ini.go

@@ -0,0 +1,464 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+var (
+	defaultSection = "default"   // default section means if some ini items not in a section, make them in default section,
+	bNumComment    = []byte{'#'} // number signal
+	bSemComment    = []byte{';'} // semicolon signal
+	bEmpty         = []byte{}
+	bEqual         = []byte{'='} // equal signal
+	bDQuote        = []byte{'"'} // quote signal
+	sectionStart   = []byte{'['} // section start signal
+	sectionEnd     = []byte{']'} // section end signal
+	lineBreak      = "\n"
+)
+
+// IniConfig implements Config to parse ini file.
+type IniConfig struct {
+}
+
+// Parse creates a new Config and parses the file configuration from the named file.
+func (ini *IniConfig) Parse(name string) (Configer, error) {
+	return ini.parseFile(name)
+}
+
+func (ini *IniConfig) parseFile(name string) (*IniConfigContainer, error) {
+	file, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+
+	cfg := &IniConfigContainer{
+		file.Name(),
+		make(map[string]map[string]string),
+		make(map[string]string),
+		make(map[string]string),
+		sync.RWMutex{},
+	}
+	cfg.Lock()
+	defer cfg.Unlock()
+	defer file.Close()
+
+	var comment bytes.Buffer
+	buf := bufio.NewReader(file)
+	// check the BOM
+	head, err := buf.Peek(3)
+	if err == nil && head[0] == 239 && head[1] == 187 && head[2] == 191 {
+		for i := 1; i <= 3; i++ {
+			buf.ReadByte()
+		}
+	}
+	section := defaultSection
+	for {
+		line, _, err := buf.ReadLine()
+		if err == io.EOF {
+			break
+		}
+		if bytes.Equal(line, bEmpty) {
+			continue
+		}
+		line = bytes.TrimSpace(line)
+
+		var bComment []byte
+		switch {
+		case bytes.HasPrefix(line, bNumComment):
+			bComment = bNumComment
+		case bytes.HasPrefix(line, bSemComment):
+			bComment = bSemComment
+		}
+		if bComment != nil {
+			line = bytes.TrimLeft(line, string(bComment))
+			// Need append to a new line if multi-line comments.
+			if comment.Len() > 0 {
+				comment.WriteByte('\n')
+			}
+			comment.Write(line)
+			continue
+		}
+
+		if bytes.HasPrefix(line, sectionStart) && bytes.HasSuffix(line, sectionEnd) {
+			section = strings.ToLower(string(line[1 : len(line)-1])) // section name case insensitive
+			if comment.Len() > 0 {
+				cfg.sectionComment[section] = comment.String()
+				comment.Reset()
+			}
+			if _, ok := cfg.data[section]; !ok {
+				cfg.data[section] = make(map[string]string)
+			}
+			continue
+		}
+
+		if _, ok := cfg.data[section]; !ok {
+			cfg.data[section] = make(map[string]string)
+		}
+		keyValue := bytes.SplitN(line, bEqual, 2)
+
+		key := string(bytes.TrimSpace(keyValue[0])) // key name case insensitive
+		key = strings.ToLower(key)
+
+		// handle include "other.conf"
+		if len(keyValue) == 1 && strings.HasPrefix(key, "include") {
+			includefiles := strings.Fields(key)
+			if includefiles[0] == "include" && len(includefiles) == 2 {
+				otherfile := strings.Trim(includefiles[1], "\"")
+				if !path.IsAbs(otherfile) {
+					otherfile = path.Join(path.Dir(name), otherfile)
+				}
+				i, err := ini.parseFile(otherfile)
+				if err != nil {
+					return nil, err
+				}
+				for sec, dt := range i.data {
+					if _, ok := cfg.data[sec]; !ok {
+						cfg.data[sec] = make(map[string]string)
+					}
+					for k, v := range dt {
+						cfg.data[sec][k] = v
+					}
+				}
+				for sec, comm := range i.sectionComment {
+					cfg.sectionComment[sec] = comm
+				}
+				for k, comm := range i.keyComment {
+					cfg.keyComment[k] = comm
+				}
+				continue
+			}
+		}
+
+		if len(keyValue) != 2 {
+			return nil, errors.New("read the content error: \"" + string(line) + "\", should key = val")
+		}
+		val := bytes.TrimSpace(keyValue[1])
+		if bytes.HasPrefix(val, bDQuote) {
+			val = bytes.Trim(val, `"`)
+		}
+
+		cfg.data[section][key] = string(val)
+		if comment.Len() > 0 {
+			cfg.keyComment[section+"."+key] = comment.String()
+			comment.Reset()
+		}
+
+	}
+	return cfg, nil
+}
+
+// ParseData parse ini the data
+func (ini *IniConfig) ParseData(data []byte) (Configer, error) {
+	// Save memory data to temporary file
+	tmpName := path.Join(os.TempDir(), "beego", fmt.Sprintf("%d", time.Now().Nanosecond()))
+	os.MkdirAll(path.Dir(tmpName), os.ModePerm)
+	if err := ioutil.WriteFile(tmpName, data, 0655); err != nil {
+		return nil, err
+	}
+	return ini.Parse(tmpName)
+}
+
+// IniConfigContainer A Config represents the ini configuration.
+// When set and get value, support key as section:name type.
+type IniConfigContainer struct {
+	filename       string
+	data           map[string]map[string]string // section=> key:val
+	sectionComment map[string]string            // section : comment
+	keyComment     map[string]string            // id: []{comment, key...}; id 1 is for main comment.
+	sync.RWMutex
+}
+
+// Bool returns the boolean value for a given key.
+func (c *IniConfigContainer) Bool(key string) (bool, error) {
+	return ParseBool(c.getdata(key))
+}
+
+// DefaultBool returns the boolean value for a given key.
+// if err != nil return defaltval
+func (c *IniConfigContainer) DefaultBool(key string, defaultval bool) bool {
+	v, err := c.Bool(key)
+	if err != nil {
+		return defaultval
+	}
+	return v
+}
+
+// Int returns the integer value for a given key.
+func (c *IniConfigContainer) Int(key string) (int, error) {
+	return strconv.Atoi(c.getdata(key))
+}
+
+// DefaultInt returns the integer value for a given key.
+// if err != nil return defaltval
+func (c *IniConfigContainer) DefaultInt(key string, defaultval int) int {
+	v, err := c.Int(key)
+	if err != nil {
+		return defaultval
+	}
+	return v
+}
+
+// Int64 returns the int64 value for a given key.
+func (c *IniConfigContainer) Int64(key string) (int64, error) {
+	return strconv.ParseInt(c.getdata(key), 10, 64)
+}
+
+// DefaultInt64 returns the int64 value for a given key.
+// if err != nil return defaltval
+func (c *IniConfigContainer) DefaultInt64(key string, defaultval int64) int64 {
+	v, err := c.Int64(key)
+	if err != nil {
+		return defaultval
+	}
+	return v
+}
+
+// Float returns the float value for a given key.
+func (c *IniConfigContainer) Float(key string) (float64, error) {
+	return strconv.ParseFloat(c.getdata(key), 64)
+}
+
+// DefaultFloat returns the float64 value for a given key.
+// if err != nil return defaltval
+func (c *IniConfigContainer) DefaultFloat(key string, defaultval float64) float64 {
+	v, err := c.Float(key)
+	if err != nil {
+		return defaultval
+	}
+	return v
+}
+
+// String returns the string value for a given key.
+func (c *IniConfigContainer) String(key string) string {
+	return c.getdata(key)
+}
+
+// DefaultString returns the string value for a given key.
+// if err != nil return defaltval
+func (c *IniConfigContainer) DefaultString(key string, defaultval string) string {
+	v := c.String(key)
+	if v == "" {
+		return defaultval
+	}
+	return v
+}
+
+// Strings returns the []string value for a given key.
+// Return nil if config value does not exist or is empty.
+func (c *IniConfigContainer) Strings(key string) []string {
+	v := c.String(key)
+	if v == "" {
+		return nil
+	}
+	return strings.Split(v, ";")
+}
+
+// DefaultStrings returns the []string value for a given key.
+// if err != nil return defaltval
+func (c *IniConfigContainer) DefaultStrings(key string, defaultval []string) []string {
+	v := c.Strings(key)
+	if v == nil {
+		return defaultval
+	}
+	return v
+}
+
+// GetSection returns map for the given section
+func (c *IniConfigContainer) GetSection(section string) (map[string]string, error) {
+	if v, ok := c.data[section]; ok {
+		return v, nil
+	}
+	return nil, errors.New("not exist setction")
+}
+
+// SaveConfigFile save the config into file
+func (c *IniConfigContainer) SaveConfigFile(filename string) (err error) {
+	// Write configuration file by filename.
+	f, err := os.Create(filename)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	// Get section or key comments. Fixed #1607
+	getCommentStr := func(section, key string) string {
+		comment, ok := "", false
+		if len(key) == 0 {
+			comment, ok = c.sectionComment[section]
+		} else {
+			comment, ok = c.keyComment[section+"."+key]
+		}
+
+		if ok {
+			// Empty comment
+			if len(comment) == 0 || len(strings.TrimSpace(comment)) == 0 {
+				return string(bNumComment)
+			}
+			prefix := string(bNumComment)
+			// Add the line head character "#"
+			return prefix + strings.Replace(comment, lineBreak, lineBreak+prefix, -1)
+		}
+		return ""
+	}
+
+	buf := bytes.NewBuffer(nil)
+	// Save default section at first place
+	if dt, ok := c.data[defaultSection]; ok {
+		for key, val := range dt {
+			if key != " " {
+				// Write key comments.
+				if v := getCommentStr(defaultSection, key); len(v) > 0 {
+					if _, err = buf.WriteString(v + lineBreak); err != nil {
+						return err
+					}
+				}
+
+				// Write key and value.
+				if _, err = buf.WriteString(key + string(bEqual) + val + lineBreak); err != nil {
+					return err
+				}
+			}
+		}
+
+		// Put a line between sections.
+		if _, err = buf.WriteString(lineBreak); err != nil {
+			return err
+		}
+	}
+	// Save named sections
+	for section, dt := range c.data {
+		if section != defaultSection {
+			// Write section comments.
+			if v := getCommentStr(section, ""); len(v) > 0 {
+				if _, err = buf.WriteString(v + lineBreak); err != nil {
+					return err
+				}
+			}
+
+			// Write section name.
+			if _, err = buf.WriteString(string(sectionStart) + section + string(sectionEnd) + lineBreak); err != nil {
+				return err
+			}
+
+			for key, val := range dt {
+				if key != " " {
+					// Write key comments.
+					if v := getCommentStr(section, key); len(v) > 0 {
+						if _, err = buf.WriteString(v + lineBreak); err != nil {
+							return err
+						}
+					}
+
+					// Write key and value.
+					if _, err = buf.WriteString(key + string(bEqual) + val + lineBreak); err != nil {
+						return err
+					}
+				}
+			}
+
+			// Put a line between sections.
+			if _, err = buf.WriteString(lineBreak); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err = buf.WriteTo(f); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Set writes a new value for key.
+// if write to one section, the key need be "section::key".
+// if the section is not existed, it panics.
+func (c *IniConfigContainer) Set(key, value string) error {
+	c.Lock()
+	defer c.Unlock()
+	if len(key) == 0 {
+		return errors.New("key is empty")
+	}
+
+	var (
+		section, k string
+		sectionKey = strings.Split(key, "::")
+	)
+
+	if len(sectionKey) >= 2 {
+		section = sectionKey[0]
+		k = sectionKey[1]
+	} else {
+		section = defaultSection
+		k = sectionKey[0]
+	}
+
+	if _, ok := c.data[section]; !ok {
+		c.data[section] = make(map[string]string)
+	}
+	c.data[section][k] = value
+	return nil
+}
+
+// DIY returns the raw value by a given key.
+func (c *IniConfigContainer) DIY(key string) (v interface{}, err error) {
+	if v, ok := c.data[strings.ToLower(key)]; ok {
+		return v, nil
+	}
+	return v, errors.New("key not find")
+}
+
+// section.key or key
+func (c *IniConfigContainer) getdata(key string) string {
+	if len(key) == 0 {
+		return ""
+	}
+	c.RLock()
+	defer c.RUnlock()
+
+	var (
+		section, k string
+		sectionKey = strings.Split(strings.ToLower(key), "::")
+	)
+	if len(sectionKey) >= 2 {
+		section = sectionKey[0]
+		k = sectionKey[1]
+	} else {
+		section = defaultSection
+		k = sectionKey[0]
+	}
+	if v, ok := c.data[section]; ok {
+		if vv, ok := v[k]; ok {
+			return vv
+		}
+	}
+	return ""
+}
+
+func init() {
+	Register("ini", &IniConfig{})
+}

+ 184 - 0
go/gopath/src/github.com/astaxie/beego/config/ini_test.go

@@ -0,0 +1,184 @@
+// Copyright 2014 beego Author. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"strings"
+	"testing"
+)
+
+func TestIni(t *testing.T) {
+
+	var (
+		inicontext = `
+;comment one
+#comment two
+appname = beeapi
+httpport = 8080
+mysqlport = 3600
+PI = 3.1415976
+runmode = "dev"
+autorender = false
+copyrequestbody = true
+session= on
+cookieon= off
+newreg = OFF
+needlogin = ON
+enableSession = Y
+enableCookie = N
+flag = 1
+[demo]
+key1="asta"
+key2 = "xie"
+CaseInsensitive = true
+peers = one;two;three
+`
+
+		keyValue = map[string]interface{}{
+			"appname":               "beeapi",
+			"httpport":              8080,
+			"mysqlport":             int64(3600),
+			"pi":                    3.1415976,
+			"runmode":               "dev",
+			"autorender":            false,
+			"copyrequestbody":       true,
+			"session":               true,
+			"cookieon":              false,
+			"newreg":                false,
+			"needlogin":             true,
+			"enableSession":         true,
+			"enableCookie":          false,
+			"flag":                  true,
+			"demo::key1":            "asta",
+			"demo::key2":            "xie",
+			"demo::CaseInsensitive": true,
+			"demo::peers":           []string{"one", "two", "three"},
+			"null":                  "",
+			"demo2::key1":           "",
+			"error":                 "",
+			"emptystrings":          []string{},
+		}
+	)
+
+	f, err := os.Create("testini.conf")
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = f.WriteString(inicontext)
+	if err != nil {
+		f.Close()
+		t.Fatal(err)
+	}
+	f.Close()
+	defer os.Remove("testini.conf")
+	iniconf, err := NewConfig("ini", "testini.conf")
+	if err != nil {
+		t.Fatal(err)
+	}
+	for k, v := range keyValue {
+		var err error
+		var value interface{}
+		switch v.(type) {
+		case int:
+			value, err = iniconf.Int(k)
+		case int64:
+			value, err = iniconf.Int64(k)
+		case float64:
+			value, err = iniconf.Float(k)
+		case bool:
+			value, err = iniconf.Bool(k)
+		case []string:
+			value = iniconf.Strings(k)
+		case string:
+			value = iniconf.String(k)
+		default:
+			value, err = iniconf.DIY(k)
+		}
+		if err != nil {
+			t.Fatalf("get key %q value fail,err %s", k, err)
+		} else if fmt.Sprintf("%v", v) != fmt.Sprintf("%v", value) {
+			t.Fatalf("get key %q value, want %v got %v .", k, v, value)
+		}
+
+	}
+	if err = iniconf.Set("name", "astaxie"); err != nil {
+		t.Fatal(err)
+	}
+	if iniconf.String("name") != "astaxie" {
+		t.Fatal("get name error")
+	}
+
+}
+
+func TestIniSave(t *testing.T) {
+
+	const (
+		inicontext = `
+app = app
+;comment one
+#comment two
+# comment three
+appname = beeapi
+httpport = 8080
+# DB Info
+# enable db
+[dbinfo]
+# db type name
+# suport mysql,sqlserver
+name = mysql
+`
+
+		saveResult = `
+app=app
+#comment one
+#comment two
+# comment three
+appname=beeapi
+httpport=8080
+
+# DB Info
+# enable db
+[dbinfo]
+# db type name
+# suport mysql,sqlserver
+name=mysql
+`
+	)
+	cfg, err := NewConfigData("ini", []byte(inicontext))
+	if err != nil {
+		t.Fatal(err)
+	}
+	name := "newIniConfig.ini"
+	if err := cfg.SaveConfigFile(name); err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(name)
+
+	if data, err := ioutil.ReadFile(name); err != nil {
+		t.Fatal(err)
+	} else {
+		cfgData := string(data)
+		datas := strings.Split(saveResult, "\n")
+		for _, line := range datas {
+			if strings.Contains(cfgData, line+"\n") == false {
+				t.Fatalf("different after save ini config file. need contains %q", line)
+			}
+		}
+
+	}
+}

+ 0 - 0
go/gopath/src/github.com/astaxie/beego/config/json.go


Некоторые файлы не были показаны из-за большого количества измененных файлов