Browse Source

上传

pull/1/head
chenyukun 1 year ago
parent
commit
4d296cb9ce
100 changed files with 18675 additions and 114 deletions
  1. +1
    -0
      .idea/inspectionProfiles/Project_Default.xml
  2. +8
    -1
      README.md
  3. +14
    -0
      config/aliyun.json
  4. +4
    -1
      config/application.json
  5. +2
    -2
      config/logger.json
  6. +9
    -7
      config/mqtt_config.json
  7. +10
    -0
      enums/ExceptionEnum.py
  8. +25
    -0
      enums/StatusEnum.py
  9. +2
    -3
      master.py
  10. +36
    -0
      osssdk/__init__.py
  11. +2980
    -0
      osssdk/api.py
  12. +704
    -0
      osssdk/auth.py
  13. +85
    -0
      osssdk/compat.py
  14. +188
    -0
      osssdk/crc64_combine.py
  15. +158
    -0
      osssdk/credentials.py
  16. +402
    -0
      osssdk/crypto.py
  17. +402
    -0
      osssdk/crypto_bucket.py
  18. +51
    -0
      osssdk/defaults.py
  19. +393
    -0
      osssdk/exceptions.py
  20. +85
    -0
      osssdk/headers.py
  21. +151
    -0
      osssdk/http.py
  22. +304
    -0
      osssdk/iterators.py
  23. +2748
    -0
      osssdk/models.py
  24. +1212
    -0
      osssdk/resumable.py
  25. +27
    -0
      osssdk/select_params.py
  26. +235
    -0
      osssdk/select_response.py
  27. +90
    -0
      osssdk/task_queue.py
  28. +1096
    -0
      osssdk/utils.py
  29. +2064
    -0
      osssdk/xml_utils.py
  30. +29
    -0
      pojo/Result.py
  31. +0
    -0
      pojo/__init__.py
  32. +1
    -1
      service/FeedbackThread.py
  33. +35
    -59
      service/PushStreamThread.py
  34. +82
    -40
      service/Service.py
  35. +269
    -0
      service/UploadFileProcess.py
  36. +45
    -0
      test/__init__.py
  37. +0
    -0
      test/image/__init__.py
  38. +17
    -0
      test/image/test.py
  39. +0
    -0
      test/ossdemo/__init__.py
  40. +58
    -0
      test/ossdemo/examples/async_fetch_task.py
  41. +66
    -0
      test/ossdemo/examples/async_process_object.py
  42. +134
    -0
      test/ossdemo/examples/bucket.py
  43. +35
    -0
      test/ossdemo/examples/bucket_access_monitor.py
  44. +55
    -0
      test/ossdemo/examples/bucket_callback_policy.py
  45. +63
    -0
      test/ossdemo/examples/bucket_cname.py
  46. +51
    -0
      test/ossdemo/examples/bucket_cors.py
  47. +129
    -0
      test/ossdemo/examples/bucket_inventory.py
  48. +46
    -0
      test/ossdemo/examples/bucket_logging.py
  49. +42
    -0
      test/ossdemo/examples/bucket_meta_query.py
  50. +58
    -0
      test/ossdemo/examples/bucket_policy.py
  51. +38
    -0
      test/ossdemo/examples/bucket_referer.py
  52. +65
    -0
      test/ossdemo/examples/bucket_replication.py
  53. +33
    -0
      test/ossdemo/examples/bucket_resource_group.py
  54. +44
    -0
      test/ossdemo/examples/bucket_style.py
  55. +40
    -0
      test/ossdemo/examples/bucket_symlink.py
  56. +50
    -0
      test/ossdemo/examples/bucket_tagging.py
  57. +36
    -0
      test/ossdemo/examples/bucket_transfer_acceleration.py
  58. +28
    -0
      test/ossdemo/examples/bucket_user_qos.py
  59. +42
    -0
      test/ossdemo/examples/bucket_versioning.py
  60. +126
    -0
      test/ossdemo/examples/bucket_website.py
  61. +50
    -0
      test/ossdemo/examples/bucket_worm.py
  62. +242
    -0
      test/ossdemo/examples/custom_crypto.py
  63. +96
    -0
      test/ossdemo/examples/download.py
  64. +18
    -0
      test/ossdemo/examples/environment_variable_credentials_provider.py
  65. BIN
      test/ossdemo/examples/example.jpg
  66. +108
    -0
      test/ossdemo/examples/image.py
  67. +111
    -0
      test/ossdemo/examples/live_channel.py
  68. +94
    -0
      test/ossdemo/examples/object_basic.py
  69. +89
    -0
      test/ossdemo/examples/object_callback.py
  70. +199
    -0
      test/ossdemo/examples/object_check.py
  71. +211
    -0
      test/ossdemo/examples/object_crypto.py
  72. +70
    -0
      test/ossdemo/examples/object_extra.py
  73. +140
    -0
      test/ossdemo/examples/object_forbid_overwrite.py
  74. +41
    -0
      test/ossdemo/examples/object_operation.py
  75. +197
    -0
      test/ossdemo/examples/object_post.py
  76. +132
    -0
      test/ossdemo/examples/object_progress.py
  77. +36
    -0
      test/ossdemo/examples/object_request_payment.py
  78. +51
    -0
      test/ossdemo/examples/object_restore.py
  79. +60
    -0
      test/ossdemo/examples/object_server_crypto.py
  80. +55
    -0
      test/ossdemo/examples/object_storage_type.py
  81. +463
    -0
      test/ossdemo/examples/object_tagging.py
  82. +71
    -0
      test/ossdemo/examples/qos_info.py
  83. +43
    -0
      test/ossdemo/examples/sdk_logging.py
  84. +53
    -0
      test/ossdemo/examples/select_csv.py
  85. +108
    -0
      test/ossdemo/examples/server_side_encryption.py
  86. +91
    -0
      test/ossdemo/examples/sign_v2.py
  87. +57
    -0
      test/ossdemo/examples/sign_v4.py
  88. +102
    -0
      test/ossdemo/examples/sts.py
  89. +63
    -0
      test/ossdemo/examples/traffic_limit.py
  90. +94
    -0
      test/ossdemo/examples/upload.py
  91. +48
    -0
      test/ossdemo/test.py
  92. +4
    -0
      test/ossdemo/tests/.gitattributes
  93. +0
    -0
      test/ossdemo/tests/__init__.py
  94. +261
    -0
      test/ossdemo/tests/common.py
  95. BIN
      test/ossdemo/tests/deprecated_encrypted_1MB_a_kms
  96. +8
    -0
      test/ossdemo/tests/deprecated_encrypted_1MB_a_meta_kms.json
  97. +6
    -0
      test/ossdemo/tests/deprecated_encrypted_1MB_a_meta_rsa.json
  98. BIN
      test/ossdemo/tests/deprecated_encrypted_1MB_a_rsa
  99. BIN
      test/ossdemo/tests/encrypted_cpp_example.jpg
  100. +0
    -0
      test/ossdemo/tests/encrypted_cpp_example_meta.json

+ 1
- 0
.idea/inspectionProfiles/Project_Default.xml View File

@@ -6,6 +6,7 @@
<list>
<option value="N803" />
<option value="N806" />
<option value="N802" />
</list>
</option>
</inspection_tool>

+ 8
- 1
README.md View File

@@ -17,4 +17,11 @@
"errorMsg": "异常描述",
"status": 0, //状态 0初始化 5待推流 10重试中 15推流中 20停止中 25完成 30超时 35失败
"current_time": "2023-07-04 11:20:20"
}
}

# 生产mqttt
url: tcp://mqtt.t-aaron.com:10883
#用户名
username: admin
#密码
password: admin##123

+ 14
- 0
config/aliyun.json View File

@@ -0,0 +1,14 @@
{
"access_key": "LTAI5tSJ62TLMUb4SZuf285A",
"access_secret": "MWYynm30filZ7x0HqSHlU3pdLVNeI7",
"oss": {
"endpoint": "http://oss-cn-shanghai.aliyuncs.com",
"bucket": "th-airprt-media",
"host_url": "https://th-airprt-media.oss-cn-shanghai.aliyuncs.com"
},
"vod": {
"host_address": "https://vod.play.t-aaron.com/",
"ecsRegionId": "cn-shanghai",
"cateId": 1000499327
}
}

+ 4
- 1
config/application.json View File

@@ -1,4 +1,7 @@
{
"pullUrl": "rtsp://localhost:8554/live",
"pushUrl": "rtmp://221.226.114.142:19350/rlive/stream_127?sign=dueyaUFe"
"pushUrl": "rtmp://192.168.10.101:19350/rlive/stream_122?sign=iKriFWqD",
"videoPath": "D:\\test\\video",
"imagePath": "D:\\test\\image",
"backup": "D:\\test\\backup"
}

+ 2
- 2
config/logger.json View File

@@ -1,8 +1,8 @@
{
"enable_file_log": 1,
"enable_stderr": 1,
"base_path": "../dsp/logs",
"log_name": "dsp.log",
"base_path": "logs",
"log_name": "airport_media.log",
"log_fmt": "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}",
"level": "INFO",
"rotation": "00:00",

+ 9
- 7
config/mqtt_config.json View File

@@ -1,10 +1,12 @@
{
"client_id": "THOBS@00000THJSQ232001",
"username": "",
"password": "",
"host": "127.0.0.1",
"port": 1883,
"client_id": "THOBS@0000THJSQ232003",
"username": "admin",
"password": "admin##123",
"host": "mqtt.t-aaron.com",
"port": 10883,
"keepalive": 60,
"sub_topic": "/v1/00000THJSQ232001/stream/push",
"res_topic": "/v1/00000THJSQ232001/stream/result"
"sub_topic": "/v1/0000THJSQ232003/stream/push",
"res_topic": "/v1/0000THJSQ232003/stream/result",
"sub_upload_topic": "/v1/0000THJSQ232003/media/upload",
"res_upload_topic": "/v1/0000THJSQ232003/media/result"
}

+ 10
- 0
enums/ExceptionEnum.py View File

@@ -15,5 +15,15 @@ class ExceptionType(Enum):

PUSH_STREAM_URL_IS_NULL = ("PS004", "推流地址不能为空!!!")

UPLOAD_TASK_IS_AREADLY = ("PS005", "上传任务已存在!!!")

GET_VIDEO_URL_EXCEPTION = ("PS006", "获取视频地址失败!!!")

UPLOAD_VIDEO_URL_EXCEPTION = ("PS007", "上传视频失败!!!")

GET_VIDEO_URL_TIMEOUT_EXCEPTION = ("PS008", "获取视频地址超时!!!")

TASK_TIMEOUT_EXCEPTION = ("PS009", "文件上传任务执行超时!!!")

SERVICE_INNER_EXCEPTION = ("PS999", "系统内部异常!!!")


+ 25
- 0
enums/StatusEnum.py View File

@@ -20,6 +20,31 @@ class StatusType(Enum):

FAILED = (35, "失败")

@unique
class UploadStatusType(Enum):

WAITING = (5, "待上传")

RUNNING = (10, "上传中")

SUCCESS = (15, "完成")

FAILED = (20, "失败")


@unique
class UploadTaskStatusType(Enum):

WAITING = (5, "待执行")

RUNNING = (10, "执行中")

SUCCESS = (15, "完成")

TIMEOUT = (20, "超时")

FAILED = (25, "失败")





+ 2
- 3
master.py View File

@@ -1,5 +1,4 @@


# -*- coding: utf-8 -*-
from os.path import dirname, realpath
from loguru import logger

@@ -13,7 +12,7 @@ from util.LogUtils import init_log
if __name__ == '__main__':
base_dir = dirname(realpath(__file__))
init_log(base_dir)
logger.info("(♥◠‿◠)ノ゙ 【推流服务】开始启动 ლ(´ڡ`ლ)゙")
logger.info("(♥◠‿◠)ノ゙ 【机场媒体服务】开始启动 ლ(´ڡ`ლ)゙")
# arg = argv
# logger.info("脚本启动参数: {}", arg)
DispatcherService(base_dir)

+ 36
- 0
osssdk/__init__.py View File

@@ -0,0 +1,36 @@
__version__ = '2.18.0'

from . import models, exceptions, defaults

from .api import Service, Bucket
from .auth import Auth, AuthV2, AuthV4, AnonymousAuth, StsAuth, AUTH_VERSION_1, AUTH_VERSION_2, AUTH_VERSION_4, \
make_auth, ProviderAuth, ProviderAuthV2, ProviderAuthV4
from .http import Session, CaseInsensitiveDict
from .credentials import EcsRamRoleCredentialsProvider, EcsRamRoleCredential, CredentialsProvider, \
StaticCredentialsProvider

from .iterators import (BucketIterator, ObjectIterator, ObjectIteratorV2,
MultipartUploadIterator, ObjectUploadIterator,
PartIterator, LiveChannelIterator)

from .resumable import resumable_upload, resumable_download, ResumableStore, ResumableDownloadStore, determine_part_size
from .resumable import make_upload_store, make_download_store

from .compat import to_bytes, to_string, to_unicode, urlparse, urlquote, urlunquote

from .utils import SizedFileAdapter, make_progress_adapter
from .utils import content_type_by_name, is_valid_bucket_name, is_valid_endpoint
from .utils import http_date, http_to_unixtime, iso8601_to_unixtime, date_to_iso8601, iso8601_to_date

from .models import BUCKET_ACL_PRIVATE, BUCKET_ACL_PUBLIC_READ, BUCKET_ACL_PUBLIC_READ_WRITE
from .models import SERVER_SIDE_ENCRYPTION_AES256, SERVER_SIDE_ENCRYPTION_KMS, SERVER_SIDE_ENCRYPTION_SM4, \
KMS_DATA_ENCRYPTION_SM4
from .models import OBJECT_ACL_DEFAULT, OBJECT_ACL_PRIVATE, OBJECT_ACL_PUBLIC_READ, OBJECT_ACL_PUBLIC_READ_WRITE
from .models import BUCKET_STORAGE_CLASS_STANDARD, BUCKET_STORAGE_CLASS_IA, BUCKET_STORAGE_CLASS_ARCHIVE, \
BUCKET_STORAGE_CLASS_COLD_ARCHIVE
from .models import BUCKET_VERSIONING_ENABLE, BUCKET_VERSIONING_SUSPEND
from .models import BUCKET_DATA_REDUNDANCY_TYPE_LRS, BUCKET_DATA_REDUNDANCY_TYPE_ZRS

from .crypto import LocalRsaProvider, AliKMSProvider, RsaProvider, EncryptionMaterials
from .crypto_bucket import CryptoBucket


+ 2980
- 0
osssdk/api.py
File diff suppressed because it is too large
View File


+ 704
- 0
osssdk/auth.py View File

@@ -0,0 +1,704 @@
# -*- coding: utf-8 -*-

import hmac
import hashlib
import time
from datetime import datetime

from loguru import logger

from . import utils
from .exceptions import ClientError
from .compat import urlquote, to_bytes, is_py2
from .headers import *
from .credentials import StaticCredentialsProvider

AUTH_VERSION_1 = 'v1'
AUTH_VERSION_2 = 'v2'
AUTH_VERSION_4 = 'v4'
DEFAULT_SIGNED_HEADERS = ['content-type', 'content-md5']


def make_auth(access_key_id, access_key_secret, auth_version=AUTH_VERSION_1):
if auth_version == AUTH_VERSION_2:
logger.debug("Init Auth V2: access_key_id: {}, access_key_secret: ******", access_key_id)
return AuthV2(access_key_id.strip(), access_key_secret.strip())
if auth_version == AUTH_VERSION_4:
logger.debug("Init Auth V4: access_key_id: {}, access_key_secret: ******", access_key_id)
return AuthV4(access_key_id.strip(), access_key_secret.strip())
else:
logger.debug("Init Auth v1: access_key_id: {}, access_key_secret: ******", access_key_id)
return Auth(access_key_id.strip(), access_key_secret.strip())


class AuthBase(object):
"""用于保存用户AccessKeyId、AccessKeySecret,以及计算签名的对象。"""

__slots__ = "credentials_provider"

def __init__(self, credentials_provider):
self.credentials_provider = credentials_provider

def _sign_rtmp_url(self, url, bucket_name, channel_name, expires, params):
credentials = self.credentials_provider.get_credentials()
if credentials.get_security_token():
params['security-token'] = credentials.get_security_token()

expiration_time = int(time.time()) + expires

canonicalized_resource = "/%s/%s" % (bucket_name, channel_name)
canonicalized_params = []

if params:
items = params.items()
for k, v in items:
if k != "OSSAccessKeyId" and k != "Signature" and k != "Expires" and k != "SecurityToken":
canonicalized_params.append((k, v))

canonicalized_params.sort(key=lambda e: e[0])
canon_params_str = ''
for k, v in canonicalized_params:
canon_params_str += '%s:%s\n' % (k, v)

p = params if params else {}
string_to_sign = str(expiration_time) + "\n" + canon_params_str + canonicalized_resource
logger.debug('Sign Rtmp url: string to be signed = {}', string_to_sign)

h = hmac.new(to_bytes(credentials.get_access_key_secret()), to_bytes(string_to_sign), hashlib.sha1)
signature = utils.b64encode_as_string(h.digest())

p['OSSAccessKeyId'] = credentials.get_access_key_id()
p['Expires'] = str(expiration_time)
p['Signature'] = signature

return url + '?' + '&'.join(_param_to_quoted_query(k, v) for k, v in p.items())


class ProviderAuth(AuthBase):
"""签名版本1
默认构造函数同父类AuthBase,需要传递credentials_provider
"""
_subresource_key_set = frozenset(
['response-content-type', 'response-content-language',
'response-cache-control', 'logging', 'response-content-encoding',
'acl', 'uploadId', 'uploads', 'partNumber', 'group', 'link',
'delete', 'website', 'location', 'objectInfo', 'objectMeta',
'response-expires', 'response-content-disposition', 'cors', 'lifecycle',
'restore', 'qos', 'referer', 'stat', 'bucketInfo', 'append', 'position', 'security-token',
'live', 'comp', 'status', 'vod', 'startTime', 'endTime', 'x-oss-process',
'symlink', 'callback', 'callback-var', 'tagging', 'encryption', 'versions',
'versioning', 'versionId', 'policy', 'requestPayment', 'x-oss-traffic-limit', 'qosInfo', 'asyncFetch',
'x-oss-request-payer', 'sequential', 'inventory', 'inventoryId', 'continuation-token', 'callback',
'callback-var', 'worm', 'wormId', 'wormExtend', 'replication', 'replicationLocation',
'replicationProgress', 'transferAcceleration', 'cname', 'metaQuery',
'x-oss-ac-source-ip', 'x-oss-ac-subnet-mask', 'x-oss-ac-vpc-id', 'x-oss-ac-forward-allow',
'resourceGroup', 'style', 'styleName', 'x-oss-async-process']
)

def _sign_request(self, req, bucket_name, key):
credentials = self.credentials_provider.get_credentials()
if credentials.get_security_token():
req.headers[OSS_SECURITY_TOKEN] = credentials.get_security_token()

req.headers['date'] = utils.http_date()

signature = self.__make_signature(req, bucket_name, key, credentials)
req.headers['authorization'] = "OSS {0}:{1}".format(credentials.get_access_key_id(), signature)

def _sign_url(self, req, bucket_name, key, expires):
credentials = self.credentials_provider.get_credentials()
if credentials.get_security_token():
req.params['security-token'] = credentials.get_security_token()

expiration_time = int(time.time()) + expires

req.headers['date'] = str(expiration_time)
signature = self.__make_signature(req, bucket_name, key, credentials)

req.params['OSSAccessKeyId'] = credentials.get_access_key_id()
req.params['Expires'] = str(expiration_time)
req.params['Signature'] = signature

return req.url + '?' + '&'.join(_param_to_quoted_query(k, v) for k, v in req.params.items())

def __make_signature(self, req, bucket_name, key, credentials):
if is_py2:
string_to_sign = self.__get_string_to_sign(req, bucket_name, key)
else:
string_to_sign = self.__get_bytes_to_sign(req, bucket_name, key)

logger.debug('Make signature: string to be signed = {}', string_to_sign)

h = hmac.new(to_bytes(credentials.get_access_key_secret()), to_bytes(string_to_sign), hashlib.sha1)
return utils.b64encode_as_string(h.digest())

def __get_string_to_sign(self, req, bucket_name, key):
resource_string = self.__get_resource_string(req, bucket_name, key)
headers_string = self.__get_headers_string(req)

content_md5 = req.headers.get('content-md5', '')
content_type = req.headers.get('content-type', '')
date = req.headers.get('x-oss-date', '') or req.headers.get('date', '')
return '\n'.join([req.method,
content_md5,
content_type,
date,
headers_string + resource_string])

def __get_headers_string(self, req):
headers = req.headers
canon_headers = []
for k, v in headers.items():
lower_key = k.lower()
if lower_key.startswith('x-oss-'):
canon_headers.append((lower_key, v))

canon_headers.sort(key=lambda x: x[0])

if canon_headers:
return '\n'.join(k + ':' + v for k, v in canon_headers) + '\n'
else:
return ''

def __get_resource_string(self, req, bucket_name, key):
if not bucket_name:
return '/' + self.__get_subresource_string(req.params)
else:
return '/{0}/{1}{2}'.format(bucket_name, key, self.__get_subresource_string(req.params))

def __get_subresource_string(self, params):
if not params:
return ''

subresource_params = []
for key, value in params.items():
if key in self._subresource_key_set:
subresource_params.append((key, value))

subresource_params.sort(key=lambda e: e[0])

if subresource_params:
return '?' + '&'.join(self.__param_to_query(k, v) for k, v in subresource_params)
else:
return ''

def __param_to_query(self, k, v):
if v:
return k + '=' + v
else:
return k

def __get_bytes_to_sign(self, req, bucket_name, key):
resource_bytes = self.__get_resource_string(req, bucket_name, key).encode('utf-8')
headers_bytes = self.__get_headers_bytes(req)

content_md5 = req.headers.get('content-md5', '').encode('utf-8')
content_type = req.headers.get('content-type', '').encode('utf-8')
date = req.headers.get('x-oss-date', '').encode('utf-8') or req.headers.get('date', '').encode('utf-8')
return b'\n'.join([req.method.encode('utf-8'),
content_md5,
content_type,
date,
headers_bytes + resource_bytes])

def __get_headers_bytes(self, req):
headers = req.headers
canon_headers = []
for k, v in headers.items():
lower_key = k.lower()
if lower_key.startswith('x-oss-'):
canon_headers.append((lower_key, v))

canon_headers.sort(key=lambda x: x[0])

if canon_headers:
return b'\n'.join(to_bytes(k) + b':' + to_bytes(v) for k, v in canon_headers) + b'\n'
else:
return b''


class Auth(ProviderAuth):
"""签名版本1
"""

def __init__(self, access_key_id, access_key_secret):
credentials_provider = StaticCredentialsProvider(access_key_id.strip(), access_key_secret.strip())
super(Auth, self).__init__(credentials_provider)


class AnonymousAuth(object):
"""用于匿名访问。

.. note::
匿名用户只能读取public-read的Bucket,或只能读取、写入public-read-write的Bucket。
不能进行Service、Bucket相关的操作,也不能罗列文件等。
"""

def _sign_request(self, req, bucket_name, key):
pass

def _sign_url(self, req, bucket_name, key, expires):
return req.url + '?' + '&'.join(_param_to_quoted_query(k, v) for k, v in req.params.items())

def _sign_rtmp_url(self, url, bucket_name, channel_name, expires, params):
return url + '?' + '&'.join(_param_to_quoted_query(k, v) for k, v in params.items())


class StsAuth(object):
"""用于STS临时凭证访问。可以通过官方STS客户端获得临时密钥(AccessKeyId、AccessKeySecret)以及临时安全令牌(SecurityToken)。

注意到临时凭证会在一段时间后过期,在此之前需要重新获取临时凭证,并更新 :class:`Bucket <oss2.Bucket>` 的 `auth` 成员变量为新
的 `StsAuth` 实例。

:param str access_key_id: 临时AccessKeyId
:param str access_key_secret: 临时AccessKeySecret
:param str security_token: 临时安全令牌(SecurityToken)
:param str auth_version: 需要生成auth的版本,默认为AUTH_VERSION_1(v1)
"""

def __init__(self, access_key_id, access_key_secret, security_token, auth_version=AUTH_VERSION_1):
logger.debug(
"Init StsAuth: access_key_id: {}, access_key_secret: ******, security_token: ******", access_key_id)
credentials_provider = StaticCredentialsProvider(access_key_id, access_key_secret, security_token)

if auth_version == AUTH_VERSION_2:
self.__auth = ProviderAuthV2(credentials_provider)
elif auth_version == AUTH_VERSION_4:
self.__auth = ProviderAuthV4(credentials_provider)
else:
self.__auth = ProviderAuth(credentials_provider)

def _sign_request(self, req, bucket_name, key):
self.__auth._sign_request(req, bucket_name, key)

def _sign_url(self, req, bucket_name, key, expires):
return self.__auth._sign_url(req, bucket_name, key, expires)

def _sign_rtmp_url(self, url, bucket_name, channel_name, expires, params):
return self.__auth._sign_rtmp_url(url, bucket_name, channel_name, expires, params)


def _param_to_quoted_query(k, v):
if v:
return urlquote(k, '') + '=' + urlquote(v, '')
else:
return urlquote(k, '')


def v2_uri_encode(raw_text):
raw_text = to_bytes(raw_text)

res = ''
for b in raw_text:
if isinstance(b, int):
c = chr(b)
else:
c = b

if (c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z') \
or (c >= '0' and c <= '9') or c in ['_', '-', '~', '.']:
res += c
else:
res += "%{0:02X}".format(ord(c))

return res


_DEFAULT_ADDITIONAL_HEADERS = set(['range',
'if-modified-since'])


class ProviderAuthV2(AuthBase):
"""签名版本2,默认构造函数同父类AuthBase,需要传递credentials_provider
与版本1的区别在:
1. 使用SHA256算法,具有更高的安全性
2. 参数计算包含所有的HTTP查询参数
"""

def _sign_request(self, req, bucket_name, key, in_additional_headers=None):
"""把authorization放入req的header里面

:param req: authorization信息将会加入到这个请求的header里面
:type req: oss2.http.Request

:param bucket_name: bucket名称
:param key: OSS文件名
:param in_additional_headers: 加入签名计算的额外header列表
"""
credentials = self.credentials_provider.get_credentials()
if credentials.get_security_token():
req.headers[OSS_SECURITY_TOKEN] = credentials.get_security_token()

if in_additional_headers is None:
in_additional_headers = _DEFAULT_ADDITIONAL_HEADERS

additional_headers = self.__get_additional_headers(req, in_additional_headers)

req.headers['date'] = utils.http_date()

signature = self.__make_signature(req, bucket_name, key, additional_headers, credentials)

if additional_headers:
req.headers['authorization'] = "OSS2 AccessKeyId:{0},AdditionalHeaders:{1},Signature:{2}" \
.format(credentials.get_access_key_id(), ';'.join(additional_headers), signature)
else:
req.headers['authorization'] = "OSS2 AccessKeyId:{0},Signature:{1}".format(credentials.get_access_key_id(),
signature)

def _sign_url(self, req, bucket_name, key, expires, in_additional_headers=None):
"""返回一个签过名的URL

:param req: 需要签名的请求
:type req: oss2.http.Request

:param bucket_name: bucket名称
:param key: OSS文件名
:param int expires: 返回的url将在`expires`秒后过期.
:param in_additional_headers: 加入签名计算的额外header列表

:return: a signed URL
"""
credentials = self.credentials_provider.get_credentials()
if credentials.get_security_token():
req.params['security-token'] = credentials.get_security_token()

if in_additional_headers is None:
in_additional_headers = set()

additional_headers = self.__get_additional_headers(req, in_additional_headers)

expiration_time = int(time.time()) + expires

req.headers['date'] = str(expiration_time) # re-use __make_signature by setting the 'date' header

req.params['x-oss-signature-version'] = 'OSS2'
req.params['x-oss-expires'] = str(expiration_time)
req.params['x-oss-access-key-id'] = credentials.get_access_key_id()

signature = self.__make_signature(req, bucket_name, key, additional_headers, credentials)

req.params['x-oss-signature'] = signature

return req.url + '?' + '&'.join(_param_to_quoted_query(k, v) for k, v in req.params.items())

def __make_signature(self, req, bucket_name, key, additional_headers, credentials):
if is_py2:
string_to_sign = self.__get_string_to_sign(req, bucket_name, key, additional_headers)
else:
string_to_sign = self.__get_bytes_to_sign(req, bucket_name, key, additional_headers)

logger.debug('Make signature: string to be signed = {}', string_to_sign)

h = hmac.new(to_bytes(credentials.get_access_key_secret()), to_bytes(string_to_sign), hashlib.sha256)
return utils.b64encode_as_string(h.digest())

def __get_additional_headers(self, req, in_additional_headers):
# we add a header into additional_headers only if it is already in req's headers.

additional_headers = set(h.lower() for h in in_additional_headers)
keys_in_header = set(k.lower() for k in req.headers.keys())

return additional_headers & keys_in_header

def __get_string_to_sign(self, req, bucket_name, key, additional_header_list):
verb = req.method
content_md5 = req.headers.get('content-md5', '')
content_type = req.headers.get('content-type', '')
date = req.headers.get('date', '')

canonicalized_oss_headers = self.__get_canonicalized_oss_headers(req, additional_header_list)
additional_headers = ';'.join(sorted(additional_header_list))
canonicalized_resource = self.__get_resource_string(req, bucket_name, key)

return verb + '\n' + \
content_md5 + '\n' + \
content_type + '\n' + \
date + '\n' + \
canonicalized_oss_headers + \
additional_headers + '\n' + \
canonicalized_resource

def __get_resource_string(self, req, bucket_name, key):
if bucket_name:
encoded_uri = v2_uri_encode('/' + bucket_name + '/' + key)
else:
encoded_uri = v2_uri_encode('/')

logger.info('encoded_uri={} key={}', encoded_uri, key)

return encoded_uri + self.__get_canonalized_query_string(req)

def __get_canonalized_query_string(self, req):
encoded_params = {}
for param, value in req.params.items():
encoded_params[v2_uri_encode(param)] = v2_uri_encode(value)

if not encoded_params:
return ''

sorted_params = sorted(encoded_params.items(), key=lambda e: e[0])
return '?' + '&'.join(self.__param_to_query(k, v) for k, v in sorted_params)

def __param_to_query(self, k, v):
if v:
return k + '=' + v
else:
return k

def __get_canonicalized_oss_headers(self, req, additional_headers):
"""
:param additional_headers: 小写的headers列表, 并且这些headers都不以'x-oss-'为前缀.
"""
canon_headers = []

for k, v in req.headers.items():
lower_key = k.lower()
if lower_key.startswith('x-oss-') or lower_key in additional_headers:
canon_headers.append((lower_key, v))

canon_headers.sort(key=lambda x: x[0])

return ''.join(v[0] + ':' + v[1] + '\n' for v in canon_headers)

def __get_bytes_to_sign(self, req, bucket_name, key, additional_header_list):
verb = req.method.encode('utf-8')
content_md5 = req.headers.get('content-md5', '').encode('utf-8')
content_type = req.headers.get('content-type', '').encode('utf-8')
date = req.headers.get('date', '').encode('utf-8')

canonicalized_oss_headers = self.__get_canonicalized_oss_headers_bytes(req, additional_header_list)
additional_headers = ';'.join(sorted(additional_header_list)).encode('utf-8')
canonicalized_resource = self.__get_resource_string(req, bucket_name, key).encode('utf-8')

return verb + b'\n' + \
content_md5 + b'\n' + \
content_type + b'\n' + \
date + b'\n' + \
canonicalized_oss_headers + \
additional_headers + b'\n' + \
canonicalized_resource

def __get_canonicalized_oss_headers_bytes(self, req, additional_headers):
"""
:param additional_headers: 小写的headers列表, 并且这些headers都不以'x-oss-'为前缀.
"""
canon_headers = []

for k, v in req.headers.items():
lower_key = k.lower()
if lower_key.startswith('x-oss-') or lower_key in additional_headers:
canon_headers.append((lower_key, v))

canon_headers.sort(key=lambda x: x[0])

return b''.join(to_bytes(v[0]) + b':' + to_bytes(v[1]) + b'\n' for v in canon_headers)


class AuthV2(ProviderAuthV2):
"""签名版本2,与版本1的区别在:
1. 使用SHA256算法,具有更高的安全性
2. 参数计算包含所有的HTTP查询参数
"""

def __init__(self, access_key_id, access_key_secret):
credentials_provider = StaticCredentialsProvider(access_key_id.strip(), access_key_secret.strip())
super(AuthV2, self).__init__(credentials_provider)


class ProviderAuthV4(AuthBase):
"""签名版本4,默认构造函数同父类AuthBase,需要传递credentials_provider
与版本2的区别在:
1. v4 签名规则引入了scope概念,SignToString(待签名串) 和 SigningKey (签名密钥)都需要包含 region信息
2. 资源路径里的 / 不做转义。 query里的 / 需要转义为 %2F
"""

def _sign_request(self, req, bucket_name, key, in_additional_headers=None):
"""把authorization放入req的header里面

:param req: authorization信息将会加入到这个请求的header里面
:type req: oss2.http.Request

:param bucket_name: bucket名称
:param key: OSS文件名
:param in_additional_headers: 加入签名计算的额外header列表
"""
if req.region is None:
raise ClientError('The region should not be None in signature version 4.')

credentials = self.credentials_provider.get_credentials()
if credentials.get_security_token():
req.headers[OSS_SECURITY_TOKEN] = credentials.get_security_token()

now_datetime = datetime.utcnow()
now_datetime_iso8601 = now_datetime.strftime("%Y%m%dT%H%M%SZ")
now_date = now_datetime_iso8601[:8]
req.headers['x-oss-date'] = now_datetime_iso8601
req.headers['x-oss-content-sha256'] = 'UNSIGNED-PAYLOAD'

additional_signed_headers = self.__get_additional_signed_headers(in_additional_headers)
credential = credentials.get_access_key_id() + "/" + self.__get_scope(now_date, req)
signature = self.__make_signature(req, bucket_name, key, additional_signed_headers, credentials)

authorization = 'OSS4-HMAC-SHA256 Credential={0}, Signature={1}'.format(credential, signature)
if additional_signed_headers:
authorization = authorization + ', AdditionalHeaders={0}'.format(';'.join(additional_signed_headers))

req.headers['authorization'] = authorization

def _sign_url(self, req, bucket_name, key, expires, in_additional_headers=None):
"""返回一个签过名的URL

:param req: 需要签名的请求
:type req: oss2.http.Request

:param bucket_name: bucket名称
:param key: OSS文件名
:param int expires: 返回的url将在`expires`秒后过期.
:param in_additional_headers: 加入签名计算的额外header列表

:return: a signed URL
"""
raise ClientError("sign_url is not support in signature version 4.")

def __make_signature(self, req, bucket_name, key, additional_signed_headers, credentials):
canonical_request = self.__get_canonical_request(req, bucket_name, key, additional_signed_headers)
string_to_sign = self.__get_string_to_sign(req, canonical_request)
signing_key = self.__get_signing_key(req, credentials)
signature = hmac.new(signing_key, to_bytes(string_to_sign), hashlib.sha256).hexdigest()
# print("canonical_request:\n" + canonical_request)
# print("string_to_sign:\n" + string_to_sign)
logger.debug('Make signature: canonical_request = {}', canonical_request)
logger.debug('Make signature: string to be signed = {}', string_to_sign)
return signature

def __get_additional_signed_headers(self, in_additional_headers):
if in_additional_headers is None:
return None
headers = []
for k in in_additional_headers:
key = k.lower()
if not (key.startswith('x-oss-') or DEFAULT_SIGNED_HEADERS.__contains__(key)):
headers.append(key)
headers.sort(key=lambda x: x[0])
return headers

def __get_canonical_uri(self, bucket_name, key):
if bucket_name:
encoded_uri = '/' + bucket_name + '/' + key
else:
encoded_uri = '/'
return self.__v4_uri_encode(encoded_uri, True)

def __param_to_query(self, k, v):
if v:
return k + '=' + v
else:
return k

def __get_canonical_query(self, req):
encoded_params = {}
for param, value in req.params.items():
encoded_params[self.__v4_uri_encode(param, False)] = self.__v4_uri_encode(value, False)

if not encoded_params:
return ''

sorted_params = sorted(encoded_params.items(), key=lambda e: e[0])
return '&'.join(self.__param_to_query(k, v) for k, v in sorted_params)

def __is_sign_header(self, key, additional_headers):
if key is not None:
if key.startswith('x-oss-'):
return True

if DEFAULT_SIGNED_HEADERS.__contains__(key):
return True

if additional_headers is not None and additional_headers.__contains__(key):
return True

return False

def __get_canonical_headers(self, req, additional_headers):
canon_headers = []
for k, v in req.headers.items():
lower_key = k.lower()
if self.__is_sign_header(lower_key, additional_headers):
canon_headers.append((lower_key, v))
canon_headers.sort(key=lambda x: x[0])
return ''.join(v[0] + ':' + v[1] + '\n' for v in canon_headers)

def __get_canonical_additional_signed_headers(self, additional_headers):
if additional_headers is None:
return ''
return ';'.join(sorted(additional_headers))

def __get_canonical_hash_payload(self, req):
if req.headers.__contains__('x-oss-content-sha256'):
return req.headers.get('x-oss-content-sha256', '')
return 'UNSIGNED-PARYLOAD'

def __get_region(self, req):
return req.cloudbox_id or req.region

def __get_product(self, req):
return req.product

def __get_scope(self, date, req):
return date + "/" + self.__get_region(req) + "/" + self.__get_product(req) + "/aliyun_v4_request"

def __get_canonical_request(self, req, bucket_name, key, additional_signed_headers):
return req.method + '\n' + \
self.__get_canonical_uri(bucket_name, key) + '\n' + \
self.__get_canonical_query(req) + '\n' + \
self.__get_canonical_headers(req, additional_signed_headers) + '\n' + \
self.__get_canonical_additional_signed_headers(additional_signed_headers) + '\n' + \
self.__get_canonical_hash_payload(req)

def __get_string_to_sign(self, req, canonical_request):
datetime = req.headers.get('x-oss-date', '')
date = datetime[:8]
return 'OSS4-HMAC-SHA256' + '\n' + \
datetime + '\n' + \
self.__get_scope(date, req) + '\n' + \
hashlib.sha256(to_bytes(canonical_request)).hexdigest()

def __get_signing_key(self, req, credentials):
date = req.headers.get('x-oss-date', '')[:8]
key_secret = 'aliyun_v4' + credentials.get_access_key_secret()
signing_date = hmac.new(to_bytes(key_secret), to_bytes(date), hashlib.sha256)
signing_region = hmac.new(signing_date.digest(), to_bytes(self.__get_region(req)), hashlib.sha256)
signing_product = hmac.new(signing_region.digest(), to_bytes(self.__get_product(req)), hashlib.sha256)
signing_key = hmac.new(signing_product.digest(), to_bytes('aliyun_v4_request'), hashlib.sha256)
return signing_key.digest()

def __v4_uri_encode(self, raw_text, ignoreSlashes):
raw_text = to_bytes(raw_text)

res = ''
for b in raw_text:
if isinstance(b, int):
c = chr(b)
else:
c = b

if (c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z') \
or (c >= '0' and c <= '9') or c in ['_', '-', '~', '.']:
res += c
elif ignoreSlashes is True and c == '/':
res += c
else:
res += "%{0:02X}".format(ord(c))

return res


class AuthV4(ProviderAuthV4):
"""签名版本4,与版本2的区别在:
1. v4 签名规则引入了scope概念,SignToString(待签名串) 和 SigningKey (签名密钥)都需要包含 region信息
2. 资源路径里的 / 不做转义。 query里的 / 需要转义为 %2F
"""

def __init__(self, access_key_id, access_key_secret):
credentials_provider = StaticCredentialsProvider(access_key_id.strip(), access_key_secret.strip())
super(AuthV4, self).__init__(credentials_provider)

+ 85
- 0
osssdk/compat.py View File

@@ -0,0 +1,85 @@
# -*- coding: utf-8 -*-

"""
兼容Python版本
"""

import sys

is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
is_py33 = (sys.version_info[0] == 3 and sys.version_info[1] == 3)


try:
import simplejson as json
except (ImportError, SyntaxError):
import json


if is_py2:
from urllib import quote as urlquote, unquote as urlunquote
from urlparse import urlparse, parse_qs, urlsplit


def to_bytes(data):
"""若输入为unicode, 则转为utf-8编码的bytes;其他则原样返回。"""
if isinstance(data, unicode):
return data.encode('utf-8')
else:
return data

def to_string(data):
"""把输入转换为str对象"""
return to_bytes(data)

def to_unicode(data):
"""把输入转换为unicode,要求输入是unicode或者utf-8编码的bytes。"""
if isinstance(data, bytes):
return data.decode('utf-8')
else:
return data

def stringify(input):
if isinstance(input, dict):
return dict([(stringify(key), stringify(value)) for key,value in input.iteritems()])
elif isinstance(input, list):
return [stringify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input

builtin_str = str
bytes = str
str = unicode


elif is_py3:
from urllib.parse import quote as urlquote, unquote as urlunquote
from urllib.parse import urlparse, parse_qs, urlsplit

def to_bytes(data):
"""若输入为str(即unicode),则转为utf-8编码的bytes;其他则原样返回"""
if isinstance(data, str):
return data.encode(encoding='utf-8')
else:
return data

def to_string(data):
"""若输入为bytes,则认为是utf-8编码,并返回str"""
if isinstance(data, bytes):
return data.decode('utf-8')
else:
return data

def to_unicode(data):
"""把输入转换为unicode,要求输入是unicode或者utf-8编码的bytes。"""
return to_string(data)

def stringify(input):
return input

builtin_str = str
bytes = bytes
str = str

+ 188
- 0
osssdk/crc64_combine.py View File

@@ -0,0 +1,188 @@
import sys

# -----------------------------------------------------------------------------
# Some code below reference to crcmod which base on python2 version
# Replace some functions to compat python3+ version
#
is_py3 = (sys.version_info[0] == 3)
if is_py3:
xrange = range
long = int
sys.maxint = sys.maxsize


# -----------------------------------------------------------------------------
# Export mkCombineFun to user to support crc64 combine feature.
#
# Example:
#
# import crcmod
#
# _POLY = 0x142F0E1EBA9EA3693
# _XOROUT = 0XFFFFFFFFFFFFFFFF
#
# string_a = '12345'
# string_b = '67890'
#
# combine_fun = mkCombineFun(_POLY, 0, True, _XOROUT)
#
# crc64_a = crcmod.Crc(_POLY, initCrc=0, xorOut=_XOROUT)
# crc64_a.update(string_a)
#
# crc64_b = crcmod.Crc(_POLY, initCrc=0, xorOut=_XOROUT)
# crc64_b.update(string_b)
#
# combine_fun(crc64_a.crcValue, crc64_b.crcValue, len(string_b))
#

def mkCombineFun(poly, initCrc=~long(0), rev=True, xorOut=0):
# mask = (1L<<n) - 1

(sizeBits, initCrc, xorOut) = _verifyParams(poly, initCrc, xorOut)

mask = (long(1) << sizeBits) - 1
if rev:
poly = _bitrev(long(poly) & mask, sizeBits)
else:
poly = long(poly) & mask

if sizeBits == 64:
fun = _combine64
else:
raise NotImplemented

def combine_fun(crc1, crc2, len2):
return fun(poly, initCrc ^ xorOut, rev, xorOut, crc1, crc2, len2)

return combine_fun


# -----------------------------------------------------------------------------
# The below code implemented crc64 combine logic, the algorithm reference to aliyun-oss-ruby-sdk
# See more details please visist:
# - https://github.com/aliyun/aliyun-oss-ruby-sdk/tree/master/ext/crcx

GF2_DIM = 64


def gf2_matrix_square(square, mat):
for n in xrange(GF2_DIM):
square[n] = gf2_matrix_times(mat, mat[n])


def gf2_matrix_times(mat, vec):
summary = 0
mat_index = 0

while vec:
if vec & 1:
summary ^= mat[mat_index]

vec >>= 1
mat_index += 1

return summary


def _combine64(poly, initCrc, rev, xorOut, crc1, crc2, len2):
if len2 == 0:
return crc1

even = [0] * GF2_DIM
odd = [0] * GF2_DIM

crc1 ^= initCrc ^ xorOut

if (rev):
# put operator for one zero bit in odd
odd[0] = poly # CRC-64 polynomial
row = 1
for n in xrange(1, GF2_DIM):
odd[n] = row
row <<= 1
else:
row = 2
for n in xrange(0, GF2_DIM - 1):
odd[n] = row
row <<= 1
odd[GF2_DIM - 1] = poly

gf2_matrix_square(even, odd)

gf2_matrix_square(odd, even)

while True:
gf2_matrix_square(even, odd)
if len2 & long(1):
crc1 = gf2_matrix_times(even, crc1)
len2 >>= 1
if len2 == 0:
break

gf2_matrix_square(odd, even)
if len2 & long(1):
crc1 = gf2_matrix_times(odd, crc1)
len2 >>= 1

if len2 == 0:
break

crc1 ^= crc2

return crc1


# -----------------------------------------------------------------------------
# The below code copy from crcmod, see more detail please visist:
# https://bitbucket.org/cmcqueen1975/crcmod/src/8fb658289c35eff1d37cc47799569f90c5b39e1e/python2/crcmod/crcmod.py?at=default&fileviewer=file-view-default

# -----------------------------------------------------------------------------
# Check the polynomial to make sure that it is acceptable and return the number
# of bits in the CRC.

def _verifyPoly(poly):
msg = 'The degree of the polynomial must be 8, 16, 24, 32 or 64'
poly = long(poly) # Use a common representation for all operations
for n in (8, 16, 24, 32, 64):
low = long(1) << n
high = low * 2
if low <= poly < high:
return n
raise ValueError(msg)


# -----------------------------------------------------------------------------
# Bit reverse the input value.

def _bitrev(x, n):
x = long(x)
y = long(0)
for i in xrange(n):
y = (y << 1) | (x & long(1))
x = x >> 1
if ((long(1) << n) - 1) <= sys.maxint:
return int(y)
return y


# -----------------------------------------------------------------------------
# The following function validates the parameters of the CRC, namely,
# poly, and initial/final XOR values.
# It returns the size of the CRC (in bits), and "sanitized" initial/final XOR values.

def _verifyParams(poly, initCrc, xorOut):
sizeBits = _verifyPoly(poly)

mask = (long(1) << sizeBits) - 1

# Adjust the initial CRC to the correct data type (unsigned value).
initCrc = long(initCrc) & mask
if mask <= sys.maxint:
initCrc = int(initCrc)

# Similar for XOR-out value.
xorOut = long(xorOut) & mask
if mask <= sys.maxint:
xorOut = int(xorOut)

return (sizeBits, initCrc, xorOut)

+ 158
- 0
osssdk/credentials.py View File

@@ -0,0 +1,158 @@
# -*- coding: utf-8 -*-
import os
import time
from traceback import format_exc

import requests
import json
import threading

from loguru import logger

from .exceptions import ClientError
from .utils import to_unixtime
from .compat import to_unicode


class Credentials(object):

__slots__ = ("access_key_id", 'access_key_secret', 'security_token')

def __init__(self, access_key_id="", access_key_secret="", security_token=""):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
self.security_token = security_token

def get_access_key_id(self):
return self.access_key_id

def get_access_key_secret(self):
return self.access_key_secret

def get_security_token(self):
return self.security_token


DEFAULT_ECS_SESSION_TOKEN_DURATION_SECONDS = 3600 * 6
DEFAULT_ECS_SESSION_EXPIRED_FACTOR = 0.85


class EcsRamRoleCredential(Credentials):
def __init__(self,
access_key_id,
access_key_secret,
security_token,
expiration,
duration,
expired_factor=None):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
self.security_token = security_token
self.expiration = expiration
self.duration = duration
self.expired_factor = expired_factor or DEFAULT_ECS_SESSION_EXPIRED_FACTOR

def get_access_key_id(self):
return self.access_key_id

def get_access_key_secret(self):
return self.access_key_secret

def get_security_token(self):
return self.security_token

def will_soon_expire(self):
now = int(time.time())
return self.duration * (1.0 - self.expired_factor) > self.expiration - now


class CredentialsProvider(object):
def get_credentials(self):
return


class StaticCredentialsProvider(CredentialsProvider):

__slots__ = "credentials"

def __init__(self, access_key_id="", access_key_secret="", security_token=""):
self.credentials = Credentials(access_key_id, access_key_secret, security_token)

def get_credentials(self):
return self.credentials


class EcsRamRoleCredentialsProvider(CredentialsProvider):
def __init__(self, auth_host, max_retries=3, timeout=10):
self.fetcher = EcsRamRoleCredentialsFetcher(auth_host)
self.max_retries = max_retries
self.timeout = timeout
self.credentials = None
self.__lock = threading.Lock()

def get_credentials(self):
if self.credentials is None or self.credentials.will_soon_expire():
with self.__lock:
if self.credentials is None or self.credentials.will_soon_expire():
try:
self.credentials = self.fetcher.fetch(self.max_retries, self.timeout)
except Exception:
logger.error("Exception: {}", format_exc())
if self.credentials is None:
raise

return self.credentials


class EcsRamRoleCredentialsFetcher(object):
def __init__(self, auth_host):
self.auth_host = auth_host

def fetch(self, retry_times=3, timeout=10):
for i in range(0, retry_times):
try:
response = requests.get(self.auth_host, timeout=timeout)
if response.status_code != 200:
raise ClientError(
"Failed to fetch credentials url, http code:{0}, msg:{1}".format(response.status_code,
response.text))
dic = json.loads(to_unicode(response.content))
code = dic.get('Code')
access_key_id = dic.get('AccessKeyId')
access_key_secret = dic.get('AccessKeySecret')
security_token = dic.get('SecurityToken')
expiration_date = dic.get('Expiration')
last_updated_date = dic.get('LastUpdated')

if code != "Success":
raise ClientError("Get credentials from ECS metadata service error, code: {0}".format(code))

expiration_stamp = to_unixtime(expiration_date, "%Y-%m-%dT%H:%M:%SZ")
duration = DEFAULT_ECS_SESSION_TOKEN_DURATION_SECONDS
if last_updated_date is not None:
last_updated_stamp = to_unixtime(last_updated_date, "%Y-%m-%dT%H:%M:%SZ")
duration = expiration_stamp - last_updated_stamp
return EcsRamRoleCredential(access_key_id, access_key_secret, security_token, expiration_stamp,
duration, DEFAULT_ECS_SESSION_EXPIRED_FACTOR)
except Exception as e:
if i == retry_times - 1:
logger.error("Exception: {}", format_exc())
raise ClientError("Failed to get credentials from ECS metadata service. {0}".format(e))


class EnvironmentVariableCredentialsProvider(CredentialsProvider):
def __init__(self):
self.access_key_id = ""
self.access_key_secret = ""
self.security_token = ""

def get_credentials(self):
access_key_id = os.getenv('OSS_ACCESS_KEY_ID')
access_key_secret = os.getenv('OSS_ACCESS_KEY_SECRET')
security_token = os.getenv('OSS_SESSION_TOKEN')

if not access_key_id:
raise ClientError("Access key id should not be null or empty.")
if not access_key_secret:
raise ClientError("Secret access key should not be null or empty.")
return Credentials(access_key_id, access_key_secret, security_token)

+ 402
- 0
osssdk/crypto.py View File

@@ -0,0 +1,402 @@
# -*- coding: utf-8 -*-

"""
oss2.encryption
~~~~~~~~~~~~~~

该模块包含了客户端加解密相关的函数和类。
"""
import abc
import json
import os
import copy
import logging
from functools import partial

import six
from Crypto.Cipher import PKCS1_OAEP, PKCS1_v1_5
from Crypto.PublicKey import RSA
from aliyunsdkcore import client
from aliyunsdkcore.acs_exception.exceptions import ServerException, ClientException
from aliyunsdkcore.http import format_type, method_type
from aliyunsdkkms.request.v20160120 import GenerateDataKeyRequest, DecryptRequest, EncryptRequest
from loguru import logger

from . import models
from . import headers
from . import utils
from .utils import b64decode_from_string, b64encode_as_string
from .compat import to_unicode
from .exceptions import ClientError, OpenApiFormatError, OpenApiServerError


class EncryptionMaterials(object):
def __init__(self, desc, key_pair=None, custom_master_key_id=None, passphrase=None):
self.desc = {}
if desc:
if isinstance(desc, dict):
self.desc = desc
else:
raise ClientError('Invalid type, the type of mat_desc must be dict!')
if key_pair and custom_master_key_id:
raise ClientError('Both key_pair and custom_master_key_id are not none')

if key_pair and not isinstance(key_pair, dict):
raise ClientError('Invalid type, the type of key_pair must be dict!')

self.key_pair = key_pair
self.custom_master_key_id = custom_master_key_id
self.passphrase = passphrase

def add_description(self, key, value):
self.desc[key] = value

def add_descriptions(self, descriptions):
for key in descriptions:
self.desc[key] = descriptions[key]


@six.add_metaclass(abc.ABCMeta)
class BaseCryptoProvider(object):
"""CryptoProvider 基类,提供基础的数据加密解密adapter

"""

def __init__(self, cipher, mat_desc=None):
if not cipher:
raise ClientError('Please initialize the value of cipher!')
self.cipher = cipher
self.cek_alg = None
self.wrap_alg = None
self.mat_desc = None
self.encryption_materials_dict = {}
if mat_desc:
if isinstance(mat_desc, dict):
self.mat_desc = mat_desc
else:
raise ClientError('Invalid type, the type of mat_desc must be dict!')

@abc.abstractmethod
def get_key(self):
pass

def get_iv(self):
return self.cipher.get_iv()

@staticmethod
def make_encrypt_adapter(stream, cipher):
return utils.make_cipher_adapter(stream, partial(cipher.encrypt))

@staticmethod
def make_decrypt_adapter(stream, cipher, discard=0):
return utils.make_cipher_adapter(stream, partial(cipher.decrypt), discard)

@abc.abstractmethod
def decrypt_encrypted_key(self, encrypted_key):
pass

@abc.abstractmethod
def decrypt_encrypted_iv(self, encrypted_iv):
pass

@abc.abstractmethod
def reset_encryption_materials(self, encryption_materials):
pass

def adjust_range(self, start, end):
return self.cipher.adjust_range(start, end)

@abc.abstractmethod
def create_content_material(self):
pass

def add_encryption_materials(self, encryption_materials):
if encryption_materials.desc:
key = frozenset(encryption_materials.desc.items())
self.encryption_materials_dict[key] = encryption_materials

def get_encryption_materials(self, desc):
if desc:
key = frozenset(desc.items())
if key in self.encryption_materials_dict.keys():
return self.encryption_materials_dict[key]


_LOCAL_RSA_TMP_DIR = '.oss-local-rsa'


@six.add_metaclass(abc.ABCMeta)
class LocalRsaProvider(BaseCryptoProvider):
"""使用本地RSA加密数据密钥。

:param str dir: 本地RSA公钥私钥存储路径
:param str key: 本地RSA公钥私钥名称前缀
:param str passphrase: 本地RSA公钥私钥密码
:param class cipher: 数据加密,默认aes256,用户可自行实现对称加密算法,需符合AESCipher注释规则
"""

DEFAULT_PUB_KEY_SUFFIX = '.public_key.pem'
DEFAULT_PRIV_KEY_SUFFIX = '.private_key.pem'

def __init__(self, dir=None, key='', passphrase=None, cipher=utils.AESCTRCipher(),
pub_key_suffix=DEFAULT_PUB_KEY_SUFFIX, private_key_suffix=DEFAULT_PRIV_KEY_SUFFIX):

super(LocalRsaProvider, self).__init__(cipher=cipher)

self.wrap_alg = headers.RSA_NONE_OAEPWithSHA1AndMGF1Padding
keys_dir = dir or os.path.join(os.path.expanduser('~'), _LOCAL_RSA_TMP_DIR)

priv_key_path = os.path.join(keys_dir, key + private_key_suffix)
pub_key_path = os.path.join(keys_dir, key + pub_key_suffix)
try:
if os.path.exists(priv_key_path) and os.path.exists(pub_key_path):
with open(priv_key_path, 'rb') as f:
self.__decrypt_obj = PKCS1_OAEP.new(RSA.importKey(f.read(), passphrase=passphrase))

with open(pub_key_path, 'rb') as f:
self.__encrypt_obj = PKCS1_OAEP.new(RSA.importKey(f.read(), passphrase=passphrase))

else:
logger.warning('The file path of private key or public key is not exist, will generate key pair')
private_key = RSA.generate(2048)
public_key = private_key.publickey()

self.__encrypt_obj = PKCS1_OAEP.new(public_key)
self.__decrypt_obj = PKCS1_OAEP.new(private_key)

utils.makedir_p(keys_dir)
with open(priv_key_path, 'wb') as f:
f.write(private_key.exportKey(passphrase=passphrase))

with open(pub_key_path, 'wb') as f:
f.write(public_key.exportKey(passphrase=passphrase))
except (ValueError, TypeError, IndexError) as e:
raise ClientError(str(e))

def get_key(self):
return self.cipher.get_key()

def decrypt_encrypted_key(self, encrypted_key):
try:
return self.__decrypt_data(encrypted_key)
except (TypeError, ValueError) as e:
raise ClientError(str(e))

def decrypt_encrypted_iv(self, encrypted_iv):
try:
return self.__decrypt_data(encrypted_iv)
except (TypeError, ValueError) as e:
raise ClientError(str(e))

def reset_encryption_materials(self, encryption_materials):
raise ClientError("do not support reset_encryption_materials!")

def create_content_material(self):
plain_key = self.get_key()
encrypted_key = self.__encrypt_data(plain_key)
plain_iv = self.get_iv()
encrypted_iv = self.__encrypt_data(plain_iv)
cipher = copy.copy(self.cipher)
wrap_alg = self.wrap_alg
mat_desc = self.mat_desc

cipher.initialize(plain_key, plain_iv)

content_crypto_material = models.ContentCryptoMaterial(cipher, wrap_alg, encrypted_key, encrypted_iv,
mat_desc)
return content_crypto_material

def __encrypt_data(self, data):
return self.__encrypt_obj.encrypt(data)

def __decrypt_data(self, data):
return self.__decrypt_obj.decrypt(data)


@six.add_metaclass(abc.ABCMeta)
class RsaProvider(BaseCryptoProvider):
"""使用本地RSA加密数据密钥。

:param str dir: 本地RSA公钥私钥存储路径
:param str key: 本地RSA公钥私钥名称前缀
:param str passphrase: 本地RSA公钥私钥密码
:param class cipher: 数据加密,默认aes256,用户可自行实现对称加密算法,需符合AESCipher注释规则
"""

def __init__(self, key_pair, passphrase=None, cipher=utils.AESCTRCipher(), mat_desc=None):

super(RsaProvider, self).__init__(cipher=cipher, mat_desc=mat_desc)
self.wrap_alg = headers.RSA_NONE_PKCS1Padding_WRAP_ALGORITHM

if key_pair and not isinstance(key_pair, dict):
raise ClientError('Invalid type, the type of key_pair must be dict!')

try:
if 'public_key' in key_pair:
self.__encrypt_obj = PKCS1_v1_5.new(RSA.importKey(key_pair['public_key'], passphrase=passphrase))

if 'private_key' in key_pair:
self.__decrypt_obj = PKCS1_v1_5.new(RSA.importKey(key_pair['private_key'], passphrase=passphrase))
except (ValueError, TypeError) as e:
raise ClientError(str(e))

def get_key(self):
return self.cipher.get_key()

def decrypt_encrypted_key(self, encrypted_key):
try:
return self.__decrypt_data(encrypted_key)
except (TypeError, ValueError) as e:
raise ClientError(str(e))

def decrypt_encrypted_iv(self, encrypted_iv):
try:
return self.__decrypt_data(encrypted_iv)
except (TypeError, ValueError) as e:
raise ClientError(str(e))

def reset_encryption_materials(self, encryption_materials):
return RsaProvider(encryption_materials.key_pair, encryption_materials.passphrase, self.cipher,
encryption_materials.desc)

def create_content_material(self):
plain_key = self.get_key()
encrypted_key = self.__encrypt_data(plain_key)
plain_iv = self.get_iv()
encrypted_iv = self.__encrypt_data(plain_iv)
cipher = copy.copy(self.cipher)
wrap_alg = self.wrap_alg
mat_desc = self.mat_desc

cipher.initialize(plain_key, plain_iv)

content_crypto_material = models.ContentCryptoMaterial(cipher, wrap_alg, encrypted_key, encrypted_iv,
mat_desc)
return content_crypto_material

def __encrypt_data(self, data):
return self.__encrypt_obj.encrypt(data)

def __decrypt_data(self, data):
decrypted_data = self.__decrypt_obj.decrypt(data, object)
if decrypted_data == object:
raise ClientError('Decrypted data error, please check you key pair!')
return decrypted_data


class AliKMSProvider(BaseCryptoProvider):
"""使用aliyun kms服务加密数据密钥。kms的详细说明参见
https://help.aliyun.com/product/28933.html?spm=a2c4g.11186623.3.1.jlYT4v
此接口在py3.3下暂时不可用,详见
https://github.com/aliyun/aliyun-openapi-python-sdk/issues/61

:param str access_key_id: 可以访问kms密钥服务的access_key_id
:param str access_key_secret: 可以访问kms密钥服务的access_key_secret
:param str region: kms密钥服务地区
:param str cmkey: 用户主密钥
:param str sts_token: security token,如果使用的是临时AK需提供
:param str passphrase: kms密钥服务密码
:param class cipher: 数据加密,默认aes256,当前仅支持默认实现
"""

def __init__(self, access_key_id, access_key_secret, region, cmk_id, sts_token=None, passphrase=None,
cipher=utils.AESCTRCipher(), mat_desc=None):

super(AliKMSProvider, self).__init__(cipher=cipher, mat_desc=mat_desc)
if not isinstance(cipher, utils.AESCTRCipher):
raise ClientError('AliKMSProvider only support AES256 cipher now')
self.wrap_alg = headers.KMS_ALI_WRAP_ALGORITHM
self.custom_master_key_id = cmk_id
self.sts_token = sts_token
self.context = '{"x-passphrase":"' + passphrase + '"}' if passphrase else ''
self.kms_client = client.AcsClient(access_key_id, access_key_secret, region)

def get_key(self):
plain_key, encrypted_key = self.__generate_data_key()
return plain_key, encrypted_key

def decrypt_encrypted_key(self, encrypted_key):
return b64decode_from_string(self.__decrypt_data(encrypted_key))

def decrypt_encrypted_iv(self, encrypted_iv, deprecated=False):
if deprecated:
return self.__decrypt_data(encrypted_iv)
return b64decode_from_string(self.__decrypt_data(encrypted_iv))

def reset_encryption_materials(self, encryption_materials):
provider = copy.copy(self)
provider.custom_master_key_id = encryption_materials.custom_master_key_id
provider.context = '{"x-passphrase":"' + encryption_materials.passphrase + '"}' if encryption_materials.passphrase else ''
provider.mat_desc = encryption_materials.desc
return provider

def create_content_material(self):
plain_key, encrypted_key = self.get_key()
plain_iv = self.get_iv()
encrypted_iv = self.__encrypt_data(b64encode_as_string(plain_iv))
cipher = copy.copy(self.cipher)
wrap_alg = self.wrap_alg
mat_desc = self.mat_desc

cipher.initialize(plain_key, plain_iv)

content_crypto_material = models.ContentCryptoMaterial(cipher, wrap_alg, encrypted_key, encrypted_iv,
mat_desc)
return content_crypto_material

def __generate_data_key(self):
req = GenerateDataKeyRequest.GenerateDataKeyRequest()

req.set_accept_format(format_type.JSON)
req.set_method(method_type.POST)

req.set_KeyId(self.custom_master_key_id)
req.set_KeySpec('AES_256')
req.set_NumberOfBytes(32)
req.set_EncryptionContext(self.context)
if self.sts_token:
req.set_STSToken(self.sts_token)

resp = self.__do(req)

return b64decode_from_string(resp['Plaintext']), resp['CiphertextBlob']

def __encrypt_data(self, data):
req = EncryptRequest.EncryptRequest()

req.set_accept_format(format_type.JSON)
req.set_method(method_type.POST)
req.set_KeyId(self.custom_master_key_id)
req.set_Plaintext(data)
req.set_EncryptionContext(self.context)
if self.sts_token:
req.set_STSToken(self.sts_token)

resp = self.__do(req)

return resp['CiphertextBlob']

def __decrypt_data(self, data):
req = DecryptRequest.DecryptRequest()

req.set_accept_format(format_type.JSON)
req.set_method(method_type.POST)
req.set_CiphertextBlob(data)
req.set_EncryptionContext(self.context)
if self.sts_token:
req.set_STSToken(self.sts_token)

resp = self.__do(req)
return resp['Plaintext']

def __do(self, req):

try:
body = self.kms_client.do_action_with_exception(req)
return json.loads(to_unicode(body))
except ServerException as e:
raise OpenApiServerError(e.http_status, e.request_id, e.message, e.error_code)
except ClientException as e:
raise ClientError(e.message)
except (KeyError, ValueError, TypeError) as e:
raise OpenApiFormatError('Json Error: ' + str(e))

+ 402
- 0
osssdk/crypto_bucket.py View File

@@ -0,0 +1,402 @@
# -*- coding: utf-8 -*-
from loguru import logger

from . import http
from . import exceptions
from . import Bucket

from .api import _make_range_string
from .models import *
from .compat import to_string, urlsplit, parse_qs
from .crypto import BaseCryptoProvider
from .exceptions import ClientError
import copy
import threading


class CryptoBucket(Bucket):
"""用于加密Bucket和Object操作的类,诸如上传、下载Object等。创建、删除bucket的操作需使用Bucket类接口。

用法(假设Bucket属于杭州区域) ::

>>> import oss2
>>> auth = oss2.Auth('your-access-key-id', 'your-access-key-secret')
>>> bucket = oss2.CryptoBucket(auth, 'http://oss-cn-hangzhou.aliyuncs.com', 'your-bucket', oss2.LocalRsaProvider())
>>> bucket.put_object('readme.txt', 'content of the object')
<oss2.models.PutObjectResult object at 0x029B9930>

:param auth: 包含了用户认证信息的Auth对象
:type auth: oss2.Auth

:param str endpoint: 访问域名或者CNAME
:param str bucket_name: Bucket名
:param crypto_provider: 客户端加密类。该参数默认为空
:type crypto_provider: oss2.crypto.BaseCryptoProvider
:param bool is_cname: 如果endpoint是CNAME则设为True;反之,则为False。

:param session: 会话。如果是None表示新开会话,非None则复用传入的会话
:type session: oss2.Session

:param float connect_timeout: 连接超时时间,以秒为单位。

:param str app_name: 应用名。该参数不为空,则在User Agent中加入其值。
注意到,最终这个字符串是要作为HTTP Header的值传输的,所以必须要遵循HTTP标准。

:param bool enable_crc: 如果开启crc校验则设为True;反之,则为False

"""

def __init__(self, auth, endpoint, bucket_name, crypto_provider,
is_cname=False,
session=None,
connect_timeout=None,
app_name='',
enable_crc=True,
):

if not isinstance(crypto_provider, BaseCryptoProvider):
raise ClientError('crypto_provider must be an instance of BaseCryptoProvider')

logger.debug("Init CryptoBucket: {0}".format(bucket_name))
super(CryptoBucket, self).__init__(auth, endpoint, bucket_name, is_cname, session, connect_timeout, app_name,
enable_crc)

self.crypto_provider = crypto_provider
self.upload_contexts = {}
self.upload_contexts_lock = threading.Lock()

if self.app_name:
self.user_agent = http.USER_AGENT + '/' + self.app_name + '/' + OSS_ENCRYPTION_CLIENT
else:
self.user_agent = http.USER_AGENT + '/' + OSS_ENCRYPTION_CLIENT

def _init_user_agent(self, headers):
if 'User-Agent' not in headers:
headers['User-Agent'] = self.user_agent
else:
headers['User-Agent'] += '/' + OSS_ENCRYPTION_CLIENT

def put_object(self, key, data,
headers=None,
progress_callback=None):
"""上传一个普通文件。

用法 ::
>>> bucket.put_object('readme.txt', 'content of readme.txt')
>>> with open(u'local_file.txt', 'rb') as f:
>>> bucket.put_object('remote_file.txt', f)

:param mat_desc: map,对象文件的description
:param key: 上传到OSS的文件名

:param data: 待上传的内容。
:type data: bytes,str或file-like object

:param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-meta-开头的头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict

:param progress_callback: 用户指定的进度回调函数。可以用来实现进度条等功能。参考 :ref:`progress_callback` 。

:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
logger.debug("Start to put object to CryptoBucket")

headers = http.CaseInsensitiveDict(headers)
self._init_user_agent(headers)
content_crypto_material = self.crypto_provider.create_content_material()
data = self.crypto_provider.make_encrypt_adapter(data, content_crypto_material.cipher)
headers = content_crypto_material.to_object_meta(headers)

return super(CryptoBucket, self).put_object(key, data, headers, progress_callback)

def put_object_with_url(self, sign_url, data, headers=None, progress_callback=None):

""" 使用加签的url上传对象

:param sign_url: 加签的url
:param data: 待上传的数据
:param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-meta-开头的头部等,必须和签名时保持一致
:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:return:
"""
raise ClientError("The operation is not support for CryptoBucket now")

def append_object(self, key, position, data,
headers=None,
progress_callback=None,
init_crc=None):
raise ClientError("The operation is not support for CryptoBucket")

def get_object(self, key,
byte_range=None,
headers=None,
progress_callback=None,
process=None,
params=None):
"""下载一个文件。

用法 ::

>>> result = bucket.get_object('readme.txt')
>>> print(result.read())
'hello world'

:param key: 文件名
:param byte_range: 指定下载范围。参见 :ref:`byte_range`

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict

:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:param process: oss文件处理,如图像服务等。指定后process,返回的内容为处理后的文件。

:param params: http 请求的查询字符串参数
:type params: dict

:return: file-like object

:raises: 如果文件不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
if process:
raise ClientError("Process object operation is not support for Crypto Bucket")

headers = http.CaseInsensitiveDict(headers)
self._init_user_agent(headers)

discard = 0
range_string = ''

if byte_range:
if byte_range[0] is None and byte_range[1]:
raise ClientError("Don't support range get while start is none and end is not")
start, end = self.crypto_provider.adjust_range(byte_range[0], byte_range[1])
adjust_byte_range = (start, end)

range_string = _make_range_string(adjust_byte_range)
if range_string:
headers['range'] = range_string

if byte_range[0] and adjust_byte_range[0] < byte_range[0]:
discard = byte_range[0] - adjust_byte_range[0]
logger.debug("adjust range of get object, byte_range: {0}, adjust_byte_range: {1}, discard: {2}".format(
byte_range, adjust_byte_range, discard))

logger.debug(
"Start to get object from CryptoBucket: {0}, key: {1}, range: {2}, headers: {3}, params: {4}".format(
self.bucket_name, to_string(key), range_string, headers, params))
resp = self._do('GET', self.bucket_name, key, headers=headers, params=params)
logger.debug("Get object done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))

return GetObjectResult(resp, progress_callback, self.enable_crc, crypto_provider=self.crypto_provider,
discard=discard)

def get_object_with_url(self, sign_url,
byte_range=None,
headers=None,
progress_callback=None):
"""使用加签的url下载文件

:param sign_url: 加签的url
:param byte_range: 指定下载范围。参见 :ref:`byte_range`

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict,必须和签名时保持一致

:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`

:return: file-like object

:raises: 如果文件不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
query = parse_qs(urlsplit(sign_url).query)
if query and (Bucket.PROCESS in query):
raise ClientError("Process object operation is not support for Crypto Bucket")

headers = http.CaseInsensitiveDict(headers)
self._init_user_agent(headers)

discard = 0
range_string = ''

if byte_range:
if not byte_range[0] and byte_range[1]:
raise ClientError("Don't support range get while start is none and end is not")
start, end = self.crypto_provider.adjust_range(byte_range[0], byte_range[1])
adjust_byte_range = (start, end)

range_string = _make_range_string(adjust_byte_range)
if range_string:
headers['range'] = range_string

if byte_range[0] and adjust_byte_range[0] < byte_range[0]:
discard = byte_range[0] - adjust_byte_range[0]
logger.debug("adjust range of get object, byte_range: {0}, adjust_byte_range: {1}, discard: {2}".format(
byte_range, adjust_byte_range, discard))

logger.debug(
"Start to get object with url from CryptoBucket: {0}, sign_url: {1}, range: {2}, headers: {3}".format(
self.bucket_name, sign_url, range_string, headers))
resp = self._do_url('GET', sign_url, headers=headers)
return GetObjectResult(resp, progress_callback, self.enable_crc,
crypto_provider=self.crypto_provider, discard=discard)

def create_select_object_meta(self, key, select_meta_params=None):
raise ClientError("The operation is not support for Crypto Bucket")

def select_object(self, key, sql,
progress_callback=None,
select_params=None
):
raise ClientError("The operation is not support for CryptoBucket")

def init_multipart_upload(self, key, headers=None, params=None, upload_context=None):
"""客户端加密初始化分片上传。

:param params
:param upload_context

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict

:return: :class:`InitMultipartUploadResult <oss2.models.InitMultipartUploadResult>`
返回值中的 `crypto_multipart_context` 记录了加密Meta信息,在upload_part时需要一并传入
"""

headers = http.CaseInsensitiveDict(headers)
self._init_user_agent(headers)
if not upload_context or not upload_context.data_size:
raise ClientError("It is not support none upload_context and must specify data_size of upload_context ")

logger.info("Start to init multipart upload by CryptoBucket, data_size: {0}, part_size: {1}".format(
upload_context.data_size, upload_context.part_size))

if upload_context.part_size:
res = self.crypto_provider.cipher.is_valid_part_size(upload_context.part_size, upload_context.data_size)
if not res:
raise ClientError("part_size is invalid for multipart upload for CryptoBucket")
else:
upload_context.part_size = self.crypto_provider.cipher.determine_part_size(upload_context.data_size)

content_crypto_material = self.crypto_provider.create_content_material()

upload_context.content_crypto_material = content_crypto_material

headers = content_crypto_material.to_object_meta(headers, upload_context)

resp = super(CryptoBucket, self).init_multipart_upload(key, headers)

return resp

def upload_part(self, key, upload_id, part_number, data, progress_callback=None, headers=None, upload_context=None):
"""客户端加密上传一个分片。

:param upload_context:
:param str key: 待上传文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID
:param int part_number: 分片号,最小值是1.
:param data: 待上传数据。
:param progress_callback: 用户指定进度回调函数。可以用来实现进度条等功能。参考 :ref:`progress_callback` 。

:param headers: 用户指定的HTTP头部。可以指定Content-MD5头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict

:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
logger.info(
"Start to upload multipart of CryptoBucket, upload_id = {0}, part_number = {1}".format(upload_id,
part_number))
headers = http.CaseInsensitiveDict(headers)
self._init_user_agent(headers)
if upload_context:
context = upload_context
else:
raise ClientError("Could not init upload context, upload contexts flag is False and upload context is none")

content_crypto_material = context.content_crypto_material

if content_crypto_material.cek_alg != self.crypto_provider.cipher.alg or content_crypto_material.wrap_alg != \
self.crypto_provider.wrap_alg:
err_msg = 'Envelope or data encryption/decryption algorithm is inconsistent'
raise InconsistentError(err_msg, self)

headers = content_crypto_material.to_object_meta(headers, context)

plain_key = self.crypto_provider.decrypt_encrypted_key(content_crypto_material.encrypted_key)
plain_iv = self.crypto_provider.decrypt_encrypted_iv(content_crypto_material.encrypted_iv)

offset = context.part_size * (part_number - 1)
counter = self.crypto_provider.cipher.calc_offset(offset)

cipher = copy.copy(content_crypto_material.cipher)
cipher.initialize(plain_key, plain_iv, counter)
data = self.crypto_provider.make_encrypt_adapter(data, cipher)
resp = super(CryptoBucket, self).upload_part(key, upload_id, part_number, data, progress_callback, headers)

return resp

def complete_multipart_upload(self, key, upload_id, parts, headers=None):
"""客户端加密完成分片上传,创建文件。
当所有分片均已上传成功,才可以调用此函数

:param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID

:param parts: PartInfo列表。PartInfo中的part_number和etag是必填项。其中的etag可以从 :func:`upload_part` 的返回值中得到。
:type parts: list of `PartInfo <oss2.models.PartInfo>`

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict

:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
logger.info("Start to complete multipart upload of CryptoBucket, upload_id = {0}".format(upload_id))

headers = http.CaseInsensitiveDict(headers)
self._init_user_agent(headers)
try:
resp = super(CryptoBucket, self).complete_multipart_upload(key, upload_id, parts, headers)
except exceptions as e:
raise e

return resp

def abort_multipart_upload(self, key, upload_id, headers=None):
"""取消分片上传。

:param headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID

:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
logger.info("Start to abort multipart upload of CryptoBucket, upload_id = {0}".format(upload_id))

headers = http.CaseInsensitiveDict(headers)
self._init_user_agent(headers)
try:
resp = super(CryptoBucket, self).abort_multipart_upload(key, upload_id)
except exceptions as e:
raise e

return resp

def upload_part_copy(self, source_bucket_name, source_key, byte_range, target_key, target_upload_id,
target_part_number, headers=None):
"""分片拷贝。把一个已有文件的一部分或整体拷贝成目标文件的一个分片。

:param target_part_number:
:param target_upload_id:
:param target_key:
:param source_key:
:param source_bucket_name:
:param byte_range: 指定待拷贝内容在源文件里的范围。参见 :ref:`byte_range`

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict

:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
raise ClientError("The operation is not support for CryptoBucket now")

def process_object(self, key, process):
raise ClientError("The operation is not support for CryptoBucket")

+ 51
- 0
osssdk/defaults.py View File

@@ -0,0 +1,51 @@
# -*- coding: utf-8 -*-

"""
oss2.defaults
~~~~~~~~~~~~~

全局缺省变量。

"""


def get(value, default_value):
if value is None:
return default_value
else:
return value


#: 连接超时时间
connect_timeout = 60

#: 缺省重试次数
request_retries = 3

#: 对于某些接口,上传数据长度大于或等于该值时,就采用分片上传。
multipart_threshold = 10 * 1024 * 1024

#: 分片上传缺省线程数
multipart_num_threads = 1

#: 缺省分片大小
part_size = 10 * 1024 * 1024

#: 最大分片的个数
max_part_count = 10000

#: 最小分片大小
min_part_size = 100 * 1024

#: 每个Session连接池大小
connection_pool_size = 10


#: 对于断点下载,如果OSS文件大小大于该值就进行并行下载(multiget)
multiget_threshold = 100 * 1024 * 1024

#: 并行下载(multiget)缺省线程数
multiget_num_threads = 4

#: 并行下载(multiget)的缺省分片大小
multiget_part_size = 10 * 1024 * 1024

+ 393
- 0
osssdk/exceptions.py View File

@@ -0,0 +1,393 @@
# -*- coding: utf-8 -*-

"""
oss2.exceptions
~~~~~~~~~~~~~~

异常类。
"""

import re
import base64
import xml.etree.ElementTree as ElementTree
from xml.parsers import expat


from .compat import to_string
from .headers import *

_OSS_ERROR_TO_EXCEPTION = {} # populated at end of module


OSS_CLIENT_ERROR_STATUS = -1
OSS_REQUEST_ERROR_STATUS = -2
OSS_INCONSISTENT_ERROR_STATUS = -3
OSS_FORMAT_ERROR_STATUS = -4
OSS_SELECT_CLIENT_ERROR_STATUS = -5


class OssError(Exception):
def __init__(self, status, headers, body, details):
#: HTTP 状态码
self.status = status

#: 请求ID,用于跟踪一个OSS请求。提交工单时,最好能够提供请求ID
self.request_id = headers.get(OSS_REQUEST_ID, '')

#: HTTP响应体(部分)
self.body = body

#: 详细错误信息,是一个string到string的dict
self.details = details

#: OSS错误码
self.code = self.details.get('Code', '')

#: OSS错误信息
self.message = self.details.get('Message', '')

#: OSS新的错误码
self.ec = self.details.get('EC', '')

#: header信息
self.headers = headers

def __str__(self):
error = {'status': self.status,
OSS_REQUEST_ID : self.request_id,
'details': self.details}
return str(error)

def _str_with_body(self):
error = {'status': self.status,
OSS_REQUEST_ID : self.request_id,
'details': self.body}
return str(error)


class ClientError(OssError):
def __init__(self, message):
OssError.__init__(self, OSS_CLIENT_ERROR_STATUS, {}, 'ClientError: ' + message, {})

def __str__(self):
return self._str_with_body()


class RequestError(OssError):
def __init__(self, e):
OssError.__init__(self, OSS_REQUEST_ERROR_STATUS, {}, 'RequestError: ' + str(e), {})
self.exception = e

def __str__(self):
return self._str_with_body()


class InconsistentError(OssError):
def __init__(self, message, request_id=''):
OssError.__init__(self, OSS_INCONSISTENT_ERROR_STATUS, {OSS_REQUEST_ID : request_id}, 'InconsistentError: ' + message, {})

def __str__(self):
return self._str_with_body()


class OpenApiFormatError(OssError):
def __init__(self, message):
OssError.__init__(self, OSS_FORMAT_ERROR_STATUS, {}, message, {})

def __str__(self):
return self._str_with_body()


class OpenApiServerError(OssError):
def __init__(self, status, request_id, message, error_code):
OssError.__init__(self, status, {OSS_REQUEST_ID : request_id}, '', {'Code': error_code, 'Message': message})


class ServerError(OssError):
pass


class NotFound(ServerError):
status = 404
code = ''


class MalformedXml(ServerError):
status = 400
code = 'MalformedXML'


class InvalidRequest(ServerError):
status = 400
code = 'InvalidRequest'


class OperationNotSupported(ServerError):
status = 400
code = 'OperationNotSupported'


class RestoreAlreadyInProgress(ServerError):
status = 409
code = 'RestoreAlreadyInProgress'


class InvalidArgument(ServerError):
status = 400
code = 'InvalidArgument'

def __init__(self, status, headers, body, details):
super(InvalidArgument, self).__init__(status, headers, body, details)
self.name = details.get('ArgumentName')
self.value = details.get('ArgumentValue')


class InvalidDigest(ServerError):
status = 400
code = 'InvalidDigest'


class InvalidObjectName(ServerError):
status = 400
code = 'InvalidObjectName'


class NotImplemented(ServerError):
status = 400
code = 'NotImplemented'


class InvalidEncryptionRequest(ServerError):
status = 400
code = 'InvalidEncryptionRequest'

class BucketReplicationAlreadyExist(ServerError):
status = 400
code = 'BucketReplicationAlreadyExist'

class NoSuchBucket(NotFound):
status = 404
code = 'NoSuchBucket'


class NoSuchKey(NotFound):
status = 404
code = 'NoSuchKey'


class NoSuchUpload(NotFound):
status = 404
code = 'NoSuchUpload'


class NoSuchWebsite(NotFound):
status = 404
code = 'NoSuchWebsiteConfiguration'


class NoSuchLifecycle(NotFound):
status = 404
code = 'NoSuchLifecycle'


class NoSuchCors(NotFound):
status = 404
code = 'NoSuchCORSConfiguration'


class NoSuchLiveChannel(NotFound):
status = 404
code = 'NoSuchLiveChannel'


class NoSuchBucketPolicy(NotFound):
status = 404
code = 'NoSuchBucketPolicy'

class NoSuchInventory(NotFound):
status = 404
code = 'NoSuchInventory'

class NoSuchReplicationRule(NotFound):
status = 404
code = 'NoSuchReplicationRule'

class Conflict(ServerError):
status = 409
code = ''


class BucketNotEmpty(Conflict):
status = 409
code = 'BucketNotEmpty'


class PositionNotEqualToLength(Conflict):
status = 409
code = 'PositionNotEqualToLength'

def __init__(self, status, headers, body, details):
super(PositionNotEqualToLength, self).__init__(status, headers, body, details)
self.next_position = int(headers[OSS_NEXT_APPEND_POSITION])


class ObjectNotAppendable(Conflict):
status = 409
code = 'ObjectNotAppendable'


class ChannelStillLive(Conflict):
status = 409
code = 'ChannelStillLive'


class LiveChannelDisabled(Conflict):
status = 409
code = 'LiveChannelDisabled'


class PreconditionFailed(ServerError):
status = 412
code = 'PreconditionFailed'


class NotModified(ServerError):
status = 304
code = ''


class AccessDenied(ServerError):
status = 403
code = 'AccessDenied'

class NoSuchServerSideEncryptionRule(NotFound):
status = 404
code = 'NoSuchServerSideEncryptionRule'

class InvalidEncryptionAlgorithmError(ServerError):
status = 400
code = 'InvalidEncryptionAlgorithmError'

class SelectOperationFailed(ServerError):
code = 'SelectOperationFailed'
def __init__(self, status, code, message):
self.status = status
self.code = code
self.message = message

def __str__(self):
error = {'status': self.status,
'code': self.code,
'details': self.message}
return str(error)

class SelectOperationClientError(OssError):
def __init__(self, message, request_id):
OssError.__init__(self, OSS_SELECT_CLIENT_ERROR_STATUS, {'x-oss-request-id': request_id}, 'SelectOperationClientError: ' + message, {})
def __str__(self):
error = {'x-oss-request-id':self.request_id,
'message': self.message}
return str(error)

class SignatureDoesNotMatch(ServerError):
status = 403
code = 'SignatureDoesNotMatch'

class ObjectAlreadyExists(ServerError):
status = 400
code = 'ObjectAlreadyExists'

class PartNotSequential(ServerError):
status = 400
code = 'PartNotSequential'

class NoSuchWORMConfiguration(ServerError):
status = 404
code = 'NoSuchWORMConfiguration'

class WORMConfigurationLocked(ServerError):
status = 403
code = 'WORMConfigurationLocked'

class InvalidWORMConfiguration(ServerError):
status = 400
code = 'InvalidWORMConfiguration'

class NoSuchTransferAccelerationConfiguration(ServerError):
status = 404
code = 'NoSuchTransferAccelerationConfiguration'

def make_exception(resp):
status = resp.status
headers = resp.headers
body = resp.read(4096)
if not body and headers.get('x-oss-err') is not None:
try:
value = base64.b64decode(to_string(headers.get('x-oss-err')))
except:
value = body
details = _parse_error_body(value)
else:
details = _parse_error_body(body)
code = details.get('Code', '')

try:
klass = _OSS_ERROR_TO_EXCEPTION[(status, code)]
return klass(status, headers, body, details)
except KeyError:
return ServerError(status, headers, body, details)


def _walk_subclasses(klass):
for sub in klass.__subclasses__():
yield sub
for subsub in _walk_subclasses(sub):
yield subsub


for klass in _walk_subclasses(ServerError):
status = getattr(klass, 'status', None)
code = getattr(klass, 'code', None)

if status is not None and code is not None:
_OSS_ERROR_TO_EXCEPTION[(status, code)] = klass


# XML parsing exceptions have changed in Python2.7 and ElementTree 1.3
if hasattr(ElementTree, 'ParseError'):
ElementTreeParseError = (ElementTree.ParseError, expat.ExpatError)
else:
ElementTreeParseError = (expat.ExpatError)


def _parse_error_body(body):
try:
root = ElementTree.fromstring(body)
if root.tag != 'Error':
return {}

details = {}
for child in root:
details[child.tag] = child.text
return details
except ElementTreeParseError:
return _guess_error_details(body)


def _guess_error_details(body):
details = {}
body = to_string(body)

if '<Error>' not in body or '</Error>' not in body:
return details

m = re.search('<Code>(.*)</Code>', body)
if m:
details['Code'] = m.group(1)

m = re.search('<Message>(.*)</Message>', body)
if m:
details['Message'] = m.group(1)

return details

+ 85
- 0
osssdk/headers.py View File

@@ -0,0 +1,85 @@
# -*- coding: utf-8 -*-
"""
oss2.headers
~~~~~~~~
这个模块包含http请求里header的key定义
同时包含了发送http请求的header, 类型为dict
"""
OSS_USER_METADATA_PREFIX = "x-oss-meta-"

OSS_CANNED_ACL = "x-oss-acl"

IF_UNMODIFIED_SINCE = "If-Unmodified-Since"
IF_MATCH = "If-Match"

OSS_COPY_OBJECT_SOURCE = "x-oss-copy-source"
OSS_COPY_OBJECT_SOURCE_RANGE = "x-oss-copy-source-range"

OSS_REQUEST_ID = "x-oss-request-id"

OSS_SECURITY_TOKEN = "x-oss-security-token"

OSS_NEXT_APPEND_POSITION = "x-oss-next-append-position"
OSS_HASH_CRC64_ECMA = "x-oss-hash-crc64ecma"
OSS_OBJECT_TYPE = "x-oss-object-type"

OSS_OBJECT_ACL = "x-oss-object-acl"

OSS_SYMLINK_TARGET = "x-oss-symlink-target"

OSS_SERVER_SIDE_ENCRYPTION = "x-oss-server-side-encryption"
OSS_SERVER_SIDE_ENCRYPTION_KEY_ID = "x-oss-server-side-encryption-key-id"

OSS_CLIENT_SIDE_ENCRYPTION_KEY = "x-oss-meta-client-side-encryption-key"
OSS_CLIENT_SIDE_ENCRYPTION_START = "x-oss-meta-client-side-encryption-start"
OSS_CLIENT_SIDE_ENCRYPTION_CEK_ALG = "x-oss-meta-client-side-encryption-cek-alg"
OSS_CLIENT_SIDE_ENCRYPTION_WRAP_ALG = "x-oss-meta-client-side-encryption-wrap-alg"
OSS_CLIENT_SIDE_ENCRYTPION_MATDESC = "x-oss-meta-client-side-encryption-matdesc"
OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_LENGTH = "x-oss-meta-client-side-encryption-unencrypted-content-length"
OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_MD5 = "x-oss-meta-client-side-encryption-unencrypted-content-md5"
OSS_CLIENT_SIDE_ENCRYPTION_DATA_SIZE = "x-oss-meta-client-side-encryption-data-size"
OSS_CLIENT_SIDE_ENCRYPTION_PART_SIZE = "x-oss-meta-client-side-encryption-part-size"

DEPRECATED_CLIENT_SIDE_ENCRYPTION_KEY = "x-oss-meta-oss-crypto-key"
DEPRECATED_CLIENT_SIDE_ENCRYPTION_START = "x-oss-meta-oss-crypto-start"
DEPRECATED_CLIENT_SIDE_ENCRYPTION_CEK_ALG = "x-oss-meta-oss-cek-alg"
DEPRECATED_CLIENT_SIDE_ENCRYPTION_WRAP_ALG = "x-oss-meta-oss-wrap-alg"
DEPRECATED_CLIENT_SIDE_ENCRYTPION_MATDESC = "x-oss-meta-oss-crypto-matdesc"
DEPRECATED_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_LENGTH = "x-oss-meta-oss-crypto-unencrypted-content-length"
DEPRECATED_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_MD5 = "x-oss-meta-oss-crypto-unencrypted-content-md5"

OSS_OBJECT_TAGGING = "x-oss-tagging"
OSS_OBJECT_TAGGING_COPY_DIRECTIVE = "x-oss-tagging-directive"

OSS_REQUEST_PAYER = 'x-oss-request-payer'

OSS_TRAFFIC_LIMIT = 'x-oss-traffic-limit'

RSA_NONE_PKCS1Padding_WRAP_ALGORITHM = 'RSA/NONE/PKCS1Padding'
RSA_NONE_OAEPWithSHA1AndMGF1Padding = 'RSA/NONE/OAEPWithSHA-1AndMGF1Padding'
KMS_ALI_WRAP_ALGORITHM = 'KMS/ALICLOUD'
OSS_ENCRYPTION_CLIENT = 'OssEncryptionClient'
OSS_TASK_ID = 'x-oss-task-id'

OSS_SERVER_SIDE_ENCRYPTION = "x-oss-server-side-encryption"
OSS_SERVER_SIDE_ENCRYPTION_KEY_ID = "x-oss-server-side-encryption-key-id"
OSS_SERVER_SIDE_DATA_ENCRYPTION = "x-oss-server-side-data-encryption"

OSS_METADATA_DIRECTIVE = 'x-oss-metadata-directive'

class RequestHeader(dict):
def __init__(self, *arg, **kw):
super(RequestHeader, self).__init__(*arg, **kw)

def set_server_side_encryption(self, algorithm=None, cmk_id=None):
if OSS_SERVER_SIDE_ENCRYPTION in self:
del self[OSS_SERVER_SIDE_ENCRYPTION]
if OSS_SERVER_SIDE_ENCRYPTION_KEY_ID in self:
del self[OSS_SERVER_SIDE_ENCRYPTION_KEY_ID]

if algorithm == "AES256":
self[OSS_SERVER_SIDE_ENCRYPTION] = "AES256"
elif algorithm == "KMS":
self[OSS_SERVER_SIDE_ENCRYPTION] = "KMS"
if cmk_id is not None:
self[OSS_SERVER_SIDE_ENCRYPTION_KEY_ID] = cmk_id

+ 151
- 0
osssdk/http.py View File

@@ -0,0 +1,151 @@
# -*- coding: utf-8 -*-

"""
oss2.http
~~~~~~~~

这个模块包含了HTTP Adapters。尽管OSS Python SDK内部使用requests库进行HTTP通信,但是对使用者是透明的。
该模块中的 `Session` 、 `Request` 、`Response` 对requests的对应的类做了简单的封装。
"""

import platform

import requests
from loguru import logger
from requests.structures import CaseInsensitiveDict

from . import __version__, defaults
from .compat import to_bytes
from .exceptions import RequestError
from .utils import file_object_remaining_bytes, SizedFileAdapter

USER_AGENT = 'aliyun-sdk-python/{0}({1}/{2}/{3};{4})'.format(
__version__, platform.system(), platform.release(), platform.machine(), platform.python_version())



class Session(object):
"""属于同一个Session的请求共享一组连接池,如有可能也会重用HTTP连接。"""

def __init__(self, pool_size=None):
self.session = requests.Session()

psize = pool_size or defaults.connection_pool_size
self.session.mount('http://', requests.adapters.HTTPAdapter(pool_connections=psize, pool_maxsize=psize))
self.session.mount('https://', requests.adapters.HTTPAdapter(pool_connections=psize, pool_maxsize=psize))

def do_request(self, req, timeout):
try:
logger.debug("Send request, method: {0}, url: {1}, params: {2}, headers: {3}, timeout: {4}, proxies: {5}".format(
req.method, req.url, req.params, req.headers, timeout, req.proxies))
return Response(self.session.request(req.method, req.url,
data=req.data,
params=req.params,
headers=req.headers,
stream=True,
timeout=timeout,
proxies=req.proxies))
except requests.RequestException as e:
raise RequestError(e)


class Request(object):
def __init__(self, method, url,
data=None,
params=None,
headers=None,
app_name='',
proxies=None,
region=None,
product=None,
cloudbox_id=None):
self.method = method
self.url = url
self.data = _convert_request_body(data)
self.params = params or {}
self.proxies = proxies
self.region = region
self.product = product
self.cloudbox_id = cloudbox_id

if not isinstance(headers, CaseInsensitiveDict):
self.headers = CaseInsensitiveDict(headers)
else:
self.headers = headers

# tell requests not to add 'Accept-Encoding: gzip, deflate' by default
if 'Accept-Encoding' not in self.headers:
self.headers['Accept-Encoding'] = None

if 'User-Agent' not in self.headers:
if app_name:
self.headers['User-Agent'] = USER_AGENT + '/' + app_name
else:
self.headers['User-Agent'] = USER_AGENT

logger.debug("Init request, method: {0}, url: {1}, params: {2}, headers: {3}".format(method, url, params,
headers))


_CHUNK_SIZE = 8 * 1024


class Response(object):
def __init__(self, response):
self.response = response
self.status = response.status_code
self.headers = response.headers
self.request_id = response.headers.get('x-oss-request-id', '')

# When a response contains no body, iter_content() cannot
# be run twice (requests.exceptions.StreamConsumedError will be raised).
# For details of the issue, please see issue #82
#
# To work around this issue, we simply return b'' when everything has been read.
#
# Note you cannot use self.response.raw.read() to implement self.read(), because
# raw.read() does not uncompress response body when the encoding is gzip etc., and
# we try to avoid depends on details of self.response.raw.
self.__all_read = False

logger.debug("Get response headers, req-id:{0}, status: {1}, headers: {2}".format(self.request_id, self.status,
self.headers))

def read(self, amt=None):
if self.__all_read:
return b''

if amt is None:
content_list = []
for chunk in self.response.iter_content(_CHUNK_SIZE):
content_list.append(chunk)
content = b''.join(content_list)

self.__all_read = True
return content
else:
try:
return next(self.response.iter_content(amt))
except StopIteration:
self.__all_read = True
return b''

def __iter__(self):
return self.response.iter_content(_CHUNK_SIZE)


# requests对于具有fileno()方法的file object,会用fileno()的返回值作为Content-Length。
# 这对于已经读取了部分内容,或执行了seek()的file object是不正确的。
#
# _convert_request_body()对于支持seek()和tell() file object,确保是从
# 当前位置读取,且只读取当前位置到文件结束的内容。
def _convert_request_body(data):
data = to_bytes(data)

if hasattr(data, '__len__'):
return data

if hasattr(data, 'seek') and hasattr(data, 'tell'):
return SizedFileAdapter(data, file_object_remaining_bytes(data))

return data

+ 304
- 0
osssdk/iterators.py View File

@@ -0,0 +1,304 @@
# -*- coding: utf-8 -*-

"""
oss2.iterators
~~~~~~~~~~~~~~

该模块包含了一些易于使用的迭代器,可以用来遍历Bucket、文件、分片上传等。
"""

from .models import MultipartUploadInfo, SimplifiedObjectInfo
from .exceptions import ServerError

from . import defaults, http


class _BaseIterator(object):
def __init__(self, marker, max_retries):
self.is_truncated = True
self.next_marker = marker

max_retries = defaults.get(max_retries, defaults.request_retries)
self.max_retries = max_retries if max_retries > 0 else 1

self.entries = []

def _fetch(self):
raise NotImplemented # pragma: no cover

def __iter__(self):
return self

def __next__(self):
while True:
if self.entries:
return self.entries.pop(0)

if not self.is_truncated:
raise StopIteration

self.fetch_with_retry()

def next(self):
return self.__next__()

def fetch_with_retry(self):
for i in range(self.max_retries):
try:
self.is_truncated, self.next_marker = self._fetch()
except ServerError as e:
if e.status // 100 != 5:
raise

if i == self.max_retries - 1:
raise
else:
return


class BucketIterator(_BaseIterator):
"""遍历用户Bucket的迭代器。

每次迭代返回的是 :class:`SimplifiedBucketInfo <oss2.models.SimplifiedBucketInfo>` 对象。

:param service: :class:`Service <oss2.Service>` 对象
:param prefix: 只列举匹配该前缀的Bucket
:param marker: 分页符。只列举Bucket名字典序在此之后的Bucket
:param max_keys: 每次调用 `list_buckets` 时的max_keys参数。注意迭代器返回的数目可能会大于该值。
"""
def __init__(self, service, prefix='', marker='', max_keys=100, max_retries=None):
super(BucketIterator, self).__init__(marker, max_retries)
self.service = service
self.prefix = prefix
self.max_keys = max_keys

def _fetch(self):
result = self.service.list_buckets(prefix=self.prefix,
marker=self.next_marker,
max_keys=self.max_keys)
self.entries = result.buckets

return result.is_truncated, result.next_marker


class ObjectIterator(_BaseIterator):
"""遍历Bucket里文件的迭代器。

每次迭代返回的是 :class:`SimplifiedObjectInfo <oss2.models.SimplifiedObjectInfo>` 对象。
当 `SimplifiedObjectInfo.is_prefix()` 返回True时,表明是公共前缀(目录)。

:param bucket: :class:`Bucket <oss2.Bucket>` 对象
:param prefix: 只列举匹配该前缀的文件
:param delimiter: 目录分隔符
:param marker: 分页符
:param max_keys: 每次调用 `list_objects` 时的max_keys参数。注意迭代器返回的数目可能会大于该值。

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
"""
def __init__(self, bucket, prefix='', delimiter='', marker='', max_keys=100, max_retries=None, headers=None):
super(ObjectIterator, self).__init__(marker, max_retries)

self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.max_keys = max_keys
self.headers = http.CaseInsensitiveDict(headers)

def _fetch(self):
result = self.bucket.list_objects(prefix=self.prefix,
delimiter=self.delimiter,
marker=self.next_marker,
max_keys=self.max_keys,
headers=self.headers)
self.entries = result.object_list + [SimplifiedObjectInfo(prefix, None, None, None, None, None)
for prefix in result.prefix_list]
self.entries.sort(key=lambda obj: obj.key)

return result.is_truncated, result.next_marker

class ObjectIteratorV2(_BaseIterator):
"""遍历Bucket里文件的迭代器。

每次迭代返回的是 :class:`SimplifiedObjectInfo <oss2.models.SimplifiedObjectInfo>` 对象。
当 `SimplifiedObjectInfo.is_prefix()` 返回True时,表明是公共前缀(目录)。

:param str prefix: 只罗列文件名为该前缀的文件
:param str delimiter: 分隔符。可以用来模拟目录
:param str continuation_token: 分页标志。首次调用传空串,后续使用返回值的next_continuation_token
:param str start_after: 起始文件名称,OSS会按照文件的字典序排列返回start_after之后的文件。
:param bool fetch_owner: 是否获取文件的owner信息,默认不返回。
:param int max_keys: 最多返回文件的个数,文件和目录的和不能超过该值

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
"""

def __init__(self, bucket, prefix='', delimiter='', continuation_token='', start_after='', fetch_owner = False, encoding_type = 'url', max_keys=100, max_retries=None, headers=None):
super(ObjectIteratorV2, self).__init__(continuation_token, max_retries)

self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.start_after = start_after
self.fetch_owner = fetch_owner
self.encoding_type = encoding_type
self.max_keys = max_keys
self.headers = http.CaseInsensitiveDict(headers)

def _fetch(self):
result = self.bucket.list_objects_v2(prefix=self.prefix,
delimiter=self.delimiter,
continuation_token=self.next_marker,
start_after=self.start_after,
fetch_owner=self.fetch_owner,
encoding_type=self.encoding_type,
max_keys=self.max_keys,
headers=self.headers)
self.entries = result.object_list + [SimplifiedObjectInfo(prefix, None, None, None, None, None)
for prefix in result.prefix_list]
self.entries.sort(key=lambda obj: obj.key)

return result.is_truncated, result.next_continuation_token

class MultipartUploadIterator(_BaseIterator):
"""遍历Bucket里未完成的分片上传。

每次返回 :class:`MultipartUploadInfo <oss2.models.MultipartUploadInfo>` 对象。
当 `MultipartUploadInfo.is_prefix()` 返回True时,表明是公共前缀(目录)。

:param bucket: :class:`Bucket <oss2.Bucket>` 对象
:param prefix: 仅列举匹配该前缀的文件的分片上传
:param delimiter: 目录分隔符
:param key_marker: 文件名分页符
:param upload_id_marker: 分片上传ID分页符
:param max_uploads: 每次调用 `list_multipart_uploads` 时的max_uploads参数。注意迭代器返回的数目可能会大于该值。

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
"""
def __init__(self, bucket,
prefix='', delimiter='', key_marker='', upload_id_marker='',
max_uploads=1000, max_retries=None, headers=None):
super(MultipartUploadIterator, self).__init__(key_marker, max_retries)

self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.next_upload_id_marker = upload_id_marker
self.max_uploads = max_uploads
self.headers = http.CaseInsensitiveDict(headers)

def _fetch(self):
result = self.bucket.list_multipart_uploads(prefix=self.prefix,
delimiter=self.delimiter,
key_marker=self.next_marker,
upload_id_marker=self.next_upload_id_marker,
max_uploads=self.max_uploads,
headers=self.headers)
self.entries = result.upload_list + [MultipartUploadInfo(prefix, None, None) for prefix in result.prefix_list]
self.entries.sort(key=lambda u: u.key)

self.next_upload_id_marker = result.next_upload_id_marker
return result.is_truncated, result.next_key_marker


class ObjectUploadIterator(_BaseIterator):
"""遍历一个Object所有未完成的分片上传。

每次返回 :class:`MultipartUploadInfo <oss2.models.MultipartUploadInfo>` 对象。
当 `MultipartUploadInfo.is_prefix()` 返回True时,表明是公共前缀(目录)。

:param bucket: :class:`Bucket <oss2.Bucket>` 对象
:param key: 文件名
:param max_uploads: 每次调用 `list_multipart_uploads` 时的max_uploads参数。注意迭代器返回的数目可能会大于该值。

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
"""
def __init__(self, bucket, key, max_uploads=1000, max_retries=None, headers=None):
super(ObjectUploadIterator, self).__init__('', max_retries)
self.bucket = bucket
self.key = key
self.next_upload_id_marker = ''
self.max_uploads = max_uploads
self.headers = http.CaseInsensitiveDict(headers)

def _fetch(self):
result = self.bucket.list_multipart_uploads(prefix=self.key,
key_marker=self.next_marker,
upload_id_marker=self.next_upload_id_marker,
max_uploads=self.max_uploads,
headers=self.headers)

self.entries = [u for u in result.upload_list if u.key == self.key]
self.next_upload_id_marker = result.next_upload_id_marker

if not result.is_truncated or not self.entries:
return False, result.next_key_marker

if result.next_key_marker > self.key:
return False, result.next_key_marker

return result.is_truncated, result.next_key_marker


class PartIterator(_BaseIterator):
"""遍历一个分片上传会话中已经上传的分片。

每次返回 :class:`PartInfo <oss2.models.PartInfo>` 对象。

:param bucket: :class:`Bucket <oss2.Bucket>` 对象
:param key: 文件名
:param upload_id: 分片上传ID
:param marker: 分页符
:param max_parts: 每次调用 `list_parts` 时的max_parts参数。注意迭代器返回的数目可能会大于该值。

:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
"""
def __init__(self, bucket, key, upload_id,
marker='0', max_parts=1000, max_retries=None, headers=None):
super(PartIterator, self).__init__(marker, max_retries)

self.bucket = bucket
self.key = key
self.upload_id = upload_id
self.max_parts = max_parts
self.headers = http.CaseInsensitiveDict(headers)

def _fetch(self):
result = self.bucket.list_parts(self.key, self.upload_id,
marker=self.next_marker,
max_parts=self.max_parts,
headers=self.headers)
self.entries = result.parts

return result.is_truncated, result.next_marker


class LiveChannelIterator(_BaseIterator):
"""遍历Bucket里文件的迭代器。

每次迭代返回的是 :class:`LiveChannelInfo <oss2.models.LiveChannelInfo>` 对象。

:param bucket: :class:`Bucket <oss2.Bucket>` 对象
:param prefix: 只列举匹配该前缀的文件
:param marker: 分页符
:param max_keys: 每次调用 `list_live_channel` 时的max_keys参数。注意迭代器返回的数目可能会大于该值。
"""
def __init__(self, bucket, prefix='', marker='', max_keys=100, max_retries=None):
super(LiveChannelIterator, self).__init__(marker, max_retries)

self.bucket = bucket
self.prefix = prefix
self.max_keys = max_keys

def _fetch(self):
result = self.bucket.list_live_channel(prefix=self.prefix,
marker=self.next_marker,
max_keys=self.max_keys)
self.entries = result.channels

return result.is_truncated, result.next_marker


+ 2748
- 0
osssdk/models.py
File diff suppressed because it is too large
View File


+ 1212
- 0
osssdk/resumable.py
File diff suppressed because it is too large
View File


+ 27
- 0
osssdk/select_params.py View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
class SelectParameters(object): # Select API中参数名常量,参数名是大小写敏感的。具体含义参加api.py中的介绍
CsvHeaderInfo = 'CsvHeaderInfo'
CommentCharacter = 'CommentCharacter'
RecordDelimiter = 'RecordDelimiter'
OutputRecordDelimiter = 'OutputRecordDelimiter'
FieldDelimiter = 'FieldDelimiter'
OutputFieldDelimiter = 'OutputFieldDelimiter'
QuoteCharacter = 'QuoteCharacter'
SplitRange = 'SplitRange'
LineRange = 'LineRange'
CompressionType = 'CompressionType'
KeepAllColumns = 'KeepAllColumns'
OutputRawData = 'OutputRawData'
EnablePayloadCrc = 'EnablePayloadCrc'
OutputHeader = 'OutputHeader'
SkipPartialDataRecord = 'SkipPartialDataRecord'
MaxSkippedRecordsAllowed = 'MaxSkippedRecordsAllowed'
AllowQuotedRecordDelimiter = 'AllowQuotedRecordDelimiter'
Json_Type = 'Json_Type'
ParseJsonNumberAsString = 'ParseJsonNumberAsString'
OverwriteIfExists = 'OverwriteIfExists'
AllowQuotedRecordDelimiter = 'AllowQuotedRecordDelimiter'

class SelectJsonTypes(object): # Select JSOn API中 Json_Type的合法值,大小写敏感。
DOCUMENT = 'DOCUMENT'
LINES = 'LINES'

+ 235
- 0
osssdk/select_response.py View File

@@ -0,0 +1,235 @@
import struct

from loguru import logger

from .exceptions import SelectOperationFailed
from .exceptions import SelectOperationClientError
from .exceptions import InconsistentError
from . import utils

"""
The adapter class for Select object's response.
The response consists of frames. Each frame has the following format:

Type | Payload Length | Header Checksum | Payload | Payload Checksum

|<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes--------->
And we have three kind of frames.
Data Frame:
Type:8388609
Payload: Offset | Data
<-8 bytes>

Continuous Frame
Type:8388612
Payload: Offset (8-bytes)

End Frame
Type:8388613
Payload: Offset | total scanned bytes | http status code | error message
<-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe--->

"""
class SelectResponseAdapter(object):

_CHUNK_SIZE = 8 * 1024
_CONTINIOUS_FRAME_TYPE=8388612
_DATA_FRAME_TYPE = 8388609
_END_FRAME_TYPE = 8388613
_META_END_FRAME_TYPE = 8388614
_JSON_META_END_FRAME_TYPE = 8388615
_FRAMES_FOR_PROGRESS_UPDATE = 10

def __init__(self, response, progress_callback = None, content_length = None, enable_crc = False):
self.response = response
self.frame_off_set = 0
self.frame_length = 0
self.frame_data = b''
self.check_sum_flag = 0
self.file_offset = 0
self.finished = 0
self.raw_buffer = b''
self.raw_buffer_offset = 0
#self.resp_content_iter = response.__iter__()
self.callback = progress_callback
self.frames_since_last_progress_report = 0
self.content_length = content_length
self.resp_content_iter = response.__iter__()
self.enable_crc = enable_crc
self.payload = b''
self.output_raw_data = response.headers.get("x-oss-select-output-raw", '') == "true"
self.request_id = response.headers.get("x-oss-request-id",'')
self.splits = 0
self.rows = 0
self.columns = 0

def read(self):
if self.finished:
return b''
content=b''
for data in self:
content += data
return content
def __iter__(self):
return self

def __next__(self):
return self.next()
def next(self):
if self.output_raw_data == True:
data = next(self.resp_content_iter)
if len(data) != 0:
return data
else: raise StopIteration

while self.finished == 0:
if self.frame_off_set < self.frame_length:
data = self.frame_data[self.frame_off_set : self.frame_length]
self.frame_length = self.frame_off_set = 0
return data
else:
self.read_next_frame()
self.frames_since_last_progress_report += 1
if (self.frames_since_last_progress_report >= SelectResponseAdapter._FRAMES_FOR_PROGRESS_UPDATE and self.callback is not None):
self.callback(self.file_offset, self.content_length)
self.frames_since_last_progress_report = 0
raise StopIteration

def read_raw(self, amt):
ret = b''
read_count = 0
while amt > 0 and self.finished == 0:
size = len(self.raw_buffer)
if size == 0:
self.raw_buffer = next(self.resp_content_iter)
self.raw_buffer_offset = 0
size = len(self.raw_buffer)
if size == 0:
break

if size - self.raw_buffer_offset >= amt:
data = self.raw_buffer[self.raw_buffer_offset:self.raw_buffer_offset + amt]
data_size = len(data)
self.raw_buffer_offset += data_size
ret += data
read_count += data_size
amt -= data_size
else:
data = self.raw_buffer[self.raw_buffer_offset:]
data_len = len(data)
ret += data
read_count += data_len
amt -= data_len
self.raw_buffer = b''
return ret

def read_next_frame(self):
frame_type = bytearray(self.read_raw(4))
payload_length = bytearray(self.read_raw(4))
utils.change_endianness_if_needed(payload_length) # convert to little endian
payload_length_val = struct.unpack("I", bytes(payload_length))[0]
header_checksum = bytearray(self.read_raw(4))

frame_type[0] = 0 #mask the version bit
utils.change_endianness_if_needed(frame_type) # convert to little endian
frame_type_val = struct.unpack("I", bytes(frame_type))[0]
if (frame_type_val != SelectResponseAdapter._DATA_FRAME_TYPE and
frame_type_val != SelectResponseAdapter._CONTINIOUS_FRAME_TYPE and
frame_type_val != SelectResponseAdapter._END_FRAME_TYPE and
frame_type_val != SelectResponseAdapter._META_END_FRAME_TYPE and
frame_type_val != SelectResponseAdapter._JSON_META_END_FRAME_TYPE):
logger.warning("Unexpected frame type: {0}. RequestId:{1}. This could be due to the old version of client.".format(frame_type_val, self.request_id))
raise SelectOperationClientError(self.request_id, "Unexpected frame type:" + str(frame_type_val))

self.payload = self.read_raw(payload_length_val)
file_offset_bytes = bytearray(self.payload[0:8])
utils.change_endianness_if_needed(file_offset_bytes)
self.file_offset = struct.unpack("Q", bytes(file_offset_bytes))[0]
if frame_type_val == SelectResponseAdapter._DATA_FRAME_TYPE:
self.frame_length = payload_length_val - 8
self.frame_off_set = 0
self.check_sum_flag=1
self.frame_data = self.payload[8:]
checksum = bytearray(self.read_raw(4)) #read checksum crc32
utils.change_endianness_if_needed(checksum)
checksum_val = struct.unpack("I", bytes(checksum))[0]
if self.enable_crc:
crc32 = utils.Crc32()
crc32.update(self.payload)
checksum_calc = crc32.crc
if checksum_val != checksum_calc:
logger.warning("Incorrect checksum: Actual {0} and calculated {1}. RequestId:{2}".format(checksum_val, checksum_calc, self.request_id))
raise InconsistentError("Incorrect checksum: Actual" + str(checksum_val) + ". Calculated:" + str(checksum_calc), self.request_id)
elif frame_type_val == SelectResponseAdapter._CONTINIOUS_FRAME_TYPE:
self.frame_length = self.frame_off_set = 0
self.check_sum_flag=1
self.read_raw(4)
elif frame_type_val == SelectResponseAdapter._END_FRAME_TYPE:
self.frame_off_set = 0
scanned_size_bytes = bytearray(self.payload[8:16])
status_bytes = bytearray(self.payload[16:20])
utils.change_endianness_if_needed(status_bytes)
status = struct.unpack("I", bytes(status_bytes))[0]
error_msg_size = payload_length_val - 20
error_msg=b''
error_code = b''
if error_msg_size > 0:
error_msg = self.payload[20:error_msg_size + 20]
error_code_index = error_msg.find(b'.')
if error_code_index >= 0 and error_code_index < error_msg_size - 1:
error_code = error_msg[0:error_code_index]
error_msg = error_msg[error_code_index + 1:]

if status // 100 != 2:
raise SelectOperationFailed(status, error_code, error_msg)
self.frame_length = 0
if self.callback is not None:
self.callback(self.file_offset, self.content_length)
self.read_raw(4) # read the payload checksum
self.frame_length = 0
self.finished = 1
elif frame_type_val == SelectResponseAdapter._META_END_FRAME_TYPE or frame_type_val == SelectResponseAdapter._JSON_META_END_FRAME_TYPE:
self.frame_off_set = 0
scanned_size_bytes = bytearray(self.payload[8:16])
status_bytes = bytearray(self.payload[16:20])
utils.change_endianness_if_needed(status_bytes)
status = struct.unpack("I", bytes(status_bytes))[0]
splits_bytes = bytearray(self.payload[20:24])
utils.change_endianness_if_needed(splits_bytes)
self.splits = struct.unpack("I", bytes(splits_bytes))[0]
lines_bytes = bytearray(self.payload[24:32])
utils.change_endianness_if_needed(lines_bytes)
self.rows = struct.unpack("Q", bytes(lines_bytes))[0]

error_index = 36
if frame_type_val == SelectResponseAdapter._META_END_FRAME_TYPE:
column_bytes = bytearray(self.payload[32:36])
utils.change_endianness_if_needed(column_bytes)
self.columns = struct.unpack("I", bytes(column_bytes))[0]
else:
error_index = 32
error_size = payload_length_val - error_index
error_msg = b''
error_code = b''
if (error_size > 0):
error_msg = self.payload[error_index:error_index + error_size]
error_code_index = error_msg.find(b'.')
if error_code_index >= 0 and error_code_index < error_size - 1:
error_code = error_msg[0:error_code_index]
error_msg = error_msg[error_code_index + 1:]

self.read_raw(4) # read the payload checksum
self.final_status = status
self.frame_length = 0
self.finished = 1
if (status / 100 != 2):
raise SelectOperationFailed(status, error_code, error_msg)


+ 90
- 0
osssdk/task_queue.py View File

@@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-

import threading
import sys

from loguru import logger

try:
import Queue as queue
except ImportError:
import queue

import traceback


class TaskQueue(object):
def __init__(self, producer, consumers):
self.__producer = producer
self.__consumers = consumers

self.__threads = []

# must be an infinite queue, otherwise producer may be blocked after all consumers being dead.
self.__queue = queue.Queue()

self.__lock = threading.Lock()
self.__exc_info = None
self.__exc_stack = ''

def run(self):
self.__add_and_run(threading.Thread(target=self.__producer_func))

for c in self.__consumers:
self.__add_and_run(threading.Thread(target=self.__consumer_func, args=(c,)))

# give KeyboardInterrupt chances to happen by joining with timeouts.
while self.__any_active():
for t in self.__threads:
t.join(1)

if self.__exc_info:
logger.error('An exception was thrown by producer or consumer, backtrace: {0}'.format(self.__exc_stack))
raise self.__exc_info[1]

def put(self, data):
assert data is not None
self.__queue.put(data)

def get(self):
return self.__queue.get()

def ok(self):
with self.__lock:
return self.__exc_info is None

def __add_and_run(self, thread):
thread.daemon = True
thread.start()
self.__threads.append(thread)

def __any_active(self):
return any(t.is_alive() for t in self.__threads)

def __producer_func(self):
try:
self.__producer(self)
except:
self.__on_exception(sys.exc_info())
self.__put_end()
else:
self.__put_end()

def __consumer_func(self, consumer):
try:
consumer(self)
except:
self.__on_exception(sys.exc_info())

def __put_end(self):
for i in range(len(self.__consumers)):
self.__queue.put(None)

def __on_exception(self, exc_info):
with self.__lock:
if self.__exc_info is None:
self.__exc_info = exc_info
self.__exc_stack = traceback.format_exc()




+ 1096
- 0
osssdk/utils.py
File diff suppressed because it is too large
View File


+ 2064
- 0
osssdk/xml_utils.py
File diff suppressed because it is too large
View File


+ 29
- 0
pojo/Result.py View File

@@ -0,0 +1,29 @@
from enums.StatusEnum import StatusType, UploadTaskStatusType
from util.QueUtil import put_queue
from util.TimeUtils import now_date_to_str


def push_result(fb_queue, errorCode="", errorMsg="", status=StatusType.RUNNING.value[0]):
put_queue(fb_queue, ('stream',
{
"errorCode": errorCode,
"errorMsg": errorMsg,
"status": status,
"currentTime": now_date_to_str()
})
)


def upload_result(fb_queue, requestId, errorCode="", errorMsg="", status=UploadTaskStatusType.RUNNING.value[0],
imageList=[], videoList=[]):
put_queue(fb_queue, ('upload',
{
"requestId": requestId,
"errorCode": errorCode,
"errorMsg": errorMsg,
"status": status,
"imageList": imageList,
"videoList": videoList,
"currentTime": now_date_to_str()
})
)

+ 0
- 0
pojo/__init__.py View File


+ 1
- 1
service/FeedbackThread.py View File

@@ -27,7 +27,7 @@ class FeedbackThread(Thread):
def run(self):
logger.info("启动反馈线程")
while True:
logger.info("反馈发送消息循环")
# logger.info("反馈发送消息循环")
try:
fb = self.getFeedback()
if fb is not None and len(fb) > 0:

+ 35
- 59
service/PushStreamThread.py View File

@@ -9,10 +9,10 @@ from loguru import logger
from enums.ExceptionEnum import ExceptionType
from enums.StatusEnum import StatusType
from exception.CustomerException import ServiceException
from pojo.Result import push_result
from util.PushStreamUtils import PushStreamUtil
from util.QueUtil import put_queue, get_no_block_queue
from util.RWUtils import getConfigs
from util.TimeUtils import now_date_to_str

'''
推流线程
@@ -29,18 +29,12 @@ class PushStreamThread(Thread):
self.__event = Queue()
self.__hb_status = StatusType.WAITTING.value[0]
application_config = getConfigs(base_dir, 'config/application.json')
push_stream_tool = PushStreamUtil(application_config.get("pullUrl"), application_config.get("pushUrl"))
if pullUrl is not None and len(pullUrl) > 0 and pushUrl is not None and len(pushUrl) > 0:
push_stream_tool.set_url(pullUrl, pushUrl)
self.__push_stream_tool = push_stream_tool
put_queue(self.__fb_queue, {
"errorCode": "",
"errorMsg": "",
"status": StatusType.WAITTING.value[0],
"current_time": now_date_to_str()}, is_throw_ex=False)
self.__push_stream_tool = PushStreamUtil(application_config.get("pullUrl"), application_config.get("pushUrl"))
self.__push_stream_tool.set_url(pullUrl, pushUrl)
push_result(self.__fb_queue, status=StatusType.WAITTING.value[0])

def send_event(self, result):
put_queue(self.__event, result, is_throw_ex=False)
put_queue(self.__event, result)

def push_stream(self, push_queue):
logger.info("开始启动推流线程!")
@@ -52,22 +46,22 @@ class PushStreamThread(Thread):
logger.warning("推流异常,请检测拉流地址和推流地址是否正常!")
if self.__push_stream_tool.push_stream_sp.returncode != 0:
logger.error("推流异常:{}", err.decode())
put_queue(push_queue, (2, StatusType.RETRYING.value[0]), is_throw_ex=False)
put_queue(push_queue, (2, StatusType.RETRYING.value[0]))
self.__push_stream_tool.close_push_stream_p()
time.sleep(1)
if not self.__push_stream_tool.status:
self.__push_stream_tool.close_push_stream_p()
put_queue(push_queue, (0,), is_throw_ex=False)
put_queue(push_queue, (0,))
break
except ServiceException as s:
logger.error("{}", s.msg)
self.__push_stream_tool.close_push_stream_p()
put_queue(push_queue, (1, s), is_throw_ex=False)
put_queue(push_queue, (1, s))
break
except Exception as e:
logger.error("异常:{}", format_exc())
self.__push_stream_tool.close_push_stream_p()
put_queue(push_queue, (1, e), is_throw_ex=False)
put_queue(push_queue, (1, e))
break
logger.info("推流线程运行结束!")

@@ -77,14 +71,13 @@ class PushStreamThread(Thread):
push = Thread(target=self.push_stream, args=(push_queue,))
push.setDaemon(True)
push.start()
count = 0
start_time = time.time()
while True:
try:
count, start_time, ex = 0, time.time(), None
try:
while True:
if self.__push_stream_tool.status and not push.is_alive():
logger.error("检测到推流线程异常停止!")
raise Exception("检测到推流线程异常停止!")
push_result = get_no_block_queue(push_queue)
ph_result = get_no_block_queue(push_queue)
event_result = get_no_block_queue(self.__event)
if event_result is not None:
command = event_result.get("command")
@@ -92,56 +85,39 @@ class PushStreamThread(Thread):
self.__hb_status = StatusType.STOPPING.value[0]
self.__push_stream_tool.status = False
self.__push_stream_tool.close_push_stream_p()
if push_result is not None and push_result[0] == 0:
if ph_result is not None and ph_result[0] == 0:
logger.info("推流任务停止中")
push.join(timeout=120)
put_queue(self.__fb_queue, {
"errorCode": "",
"errorMsg": "",
"status": StatusType.SUCCESS.value[0],
"current_time": now_date_to_str()}, is_throw_ex=False)
push.join(timeout=60)
push_result(self.__fb_queue, status=StatusType.SUCCESS.value[0])
break
if push_result is not None and push_result[0] == 1:
if ph_result is not None and ph_result[0] == 1:
logger.info("推流任务异常停止中")
push.join(timeout=120)
raise push_result[1]
if push_result is not None and push_result[0] == 2:
if StatusType.RETRYING.value[0] == push_result[1]:
push.join(timeout=60)
raise ph_result[1]
if ph_result is not None and ph_result[0] == 2:
if StatusType.RETRYING.value[0] == ph_result[1]:
self.__hb_status = StatusType.RETRYING.value[0]
start_time = time.time()
if time.time() - start_time > 20:
self.__hb_status = StatusType.RUNNING.value[0]
count += 1
if count % 10 == 0:
put_queue(self.__fb_queue, {
"errorCode": "",
"errorMsg": "",
"status": self.__hb_status,
"current_time": now_date_to_str()}, is_throw_ex=False)
push_result(self.__fb_queue, status=self.__hb_status)
count = 0
time.sleep(1)
except ServiceException as s:
logger.error("推流异常, code: {}, msg: {}", s.code, s.msg)
self.__push_stream_tool.status = False
self.__push_stream_tool.close_push_stream_p()
push.join(timeout=120)
put_queue(self.__fb_queue, {
"errorCode": s.code,
"errorMsg": s.msg,
"status": StatusType.FAILED.value[0],
"current_time": now_date_to_str()}, is_throw_ex=False)
break
except Exception:
logger.error("推流异常:{}", format_exc())
self.__push_stream_tool.status = False
self.__push_stream_tool.close_push_stream_p()
push.join(timeout=120)
put_queue(self.__fb_queue, {
"errorCode": ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
"errorMsg": ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
"status": StatusType.FAILED.value[0],
"current_time": now_date_to_str()}, is_throw_ex=False)
break
except ServiceException as s:
logger.error("推流异常, code: {}, msg: {}", s.code, s.msg)
ex = s.code, s.msg
except Exception:
logger.error("推流异常:{}", format_exc())
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
self.__push_stream_tool.status = False
self.__push_stream_tool.close_push_stream_p()
push.join(timeout=60)
if ex:
code, msg = ex
push_result(self.__fb_queue, code, msg, status=StatusType.FAILED.value[0])
logger.info("推流检测线程执行完成")

# """

+ 82
- 40
service/Service.py View File

@@ -1,4 +1,6 @@
from queue import Queue
# -*- coding: utf-8 -*-
import queue
from multiprocessing import Queue
from time import sleep, time
from traceback import format_exc

@@ -6,37 +8,44 @@ from cerberus import Validator
from loguru import logger

from enums.ExceptionEnum import ExceptionType
from enums.StatusEnum import StatusType
from enums.StatusEnum import StatusType, UploadTaskStatusType
from exception.CustomerException import ServiceException
from pojo.Result import push_result, upload_result
from service.FeedbackThread import FeedbackThread
from service.PushStreamThread import PushStreamThread
from service.UploadFileProcess import UploadFileProcess
from util.MqttUtil import MqttClient
from util.QueUtil import get_no_block_queue, put_queue
from util.TimeUtils import now_date_to_str
from util.QueUtil import get_no_block_queue


class DispatcherService:
__slots__ = ("__base_dir", "__msg_queue", "__fb_queue", "__feedbackThread", "__task")
__slots__ = ("__base_dir", "__msg_queue", "__fb_queue", "__feedbackThread", "__task", "__task_upload",
'__handle_method')

def __init__(self, base_dir):
self.__base_dir = base_dir
self.__msg_queue = Queue()
self.__msg_queue = queue.Queue()
self.__fb_queue = Queue()
self.__feedbackThread = None
self.__task = None
self.__task_upload = None
self.__handle_method = {
"stream": lambda x: self.handle_message(x),
"upload": lambda x: self.handle_upload(x),
}
self.start_service()

def start_service(self):
mq = MqttClient(self.__base_dir, self.__msg_queue, self.__fb_queue)
mq.start()
sleep(1)
retry_count = 0
start_time = time()

retry_count, start_time = 0, time()
while True:
try:
if self.__task is not None and not self.__task.is_alive():
self.__task = None
if self.__task_upload is not None and not self.__task_upload.is_alive():
self.__task_upload = None
retry_count, start_time = self.start_feedback_thread(mq, retry_count, start_time)
if not mq.client.is_connected():
logger.info("mqtt重连中")
@@ -47,32 +56,24 @@ class DispatcherService:
# 订阅消息处理
message = get_no_block_queue(self.__msg_queue)
if message is not None and len(message) > 0:
self.handle_message(message)
self.__handle_method.get(message[0])(message[1])
else:
sleep(1)
except Exception:
logger.error("推流服务异常: {}", format_exc())
logger.error("服务异常: {}", format_exc())

def handle_message(self, message):
try:
self.check_msg(message)
self.check_msg(SCHEMA, message)
command = message.get("command")
if 'start' == command:
if self.__task:
logger.warning("推流任务已存在!!!")
put_queue(self.__fb_queue, {
"errorCode": ExceptionType.PUSH_STREAM_TASK_IS_AREADLY.value[0],
"errorMsg": ExceptionType.PUSH_STREAM_TASK_IS_AREADLY.value[1],
"status": StatusType.RUNNING.value[0],
"current_time": now_date_to_str()}, is_throw_ex=False)
push_result(self.__fb_queue, ExceptionType.PUSH_STREAM_TASK_IS_AREADLY.value[0],
ExceptionType.PUSH_STREAM_TASK_IS_AREADLY.value[1])
return
pullUrl = message.get("pullUrl")
pushUrl = message.get("pushUrl")
put_queue(self.__fb_queue, {
"errorCode": "",
"errorMsg": "",
"status": StatusType.INIT.value[0],
"current_time": now_date_to_str()}, is_throw_ex=False)
pullUrl, pushUrl = message.get("pullUrl"), message.get("pushUrl")
push_result(self.__fb_queue, status=StatusType.INIT.value[0])
p_thread = PushStreamThread(self.__fb_queue, self.__base_dir, pullUrl, pushUrl)
p_thread.setDaemon(True)
p_thread.start()
@@ -80,30 +81,55 @@ class DispatcherService:
if 'stop' == command:
if self.__task is None:
logger.warning("推流任务不存在, 任务无法停止!")
put_queue(self.__fb_queue, {
"errorCode": ExceptionType.PUSH_STREAM_TASK_IS_NOT_AREADLY.value[0],
"errorMsg": ExceptionType.PUSH_STREAM_TASK_IS_NOT_AREADLY.value[1],
"status": StatusType.SUCCESS.value[0],
"current_time": now_date_to_str()}, is_throw_ex=False)
push_result(self.__fb_queue, ExceptionType.PUSH_STREAM_TASK_IS_NOT_AREADLY.value[0],
ExceptionType.PUSH_STREAM_TASK_IS_NOT_AREADLY.value[1], StatusType.SUCCESS.value[0])
return
self.__task.send_event({"command": "stop"})
except ServiceException as s:
put_queue(self.__fb_queue, {
"errorCode": s.code,
"errorMsg": s.msg,
"status": StatusType.FAILED.value[0],
"current_time": now_date_to_str()}, is_throw_ex=False)
push_result(self.__fb_queue, s.code, s.msg, StatusType.FAILED.value[0])
except Exception:
logger.error("消息处理异常: {}", format_exc())
put_queue(self.__fb_queue, {"errorCode": ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
"errorMsg": ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
"status": StatusType.FAILED.value[0],
"current_time": now_date_to_str()}, is_throw_ex=False)
push_result(self.__fb_queue, ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], StatusType.FAILED.value[0])
finally:
del message

def handle_upload(self, message):
try:
self.check_msg(UPLOAD_SCHEMA, message)
command = message.get("command")
if 'start' == command:
if self.__task_upload:
logger.warning("上传任务已存在!!!")
upload_result(self.__fb_queue, message.get("requestId"),
errorCode=ExceptionType.UPLOAD_TASK_IS_AREADLY.value[0],
errorMsg=ExceptionType.UPLOAD_TASK_IS_AREADLY.value[1],
status=UploadTaskStatusType.FAILED.value[0])
return
upload_p = UploadFileProcess(self.__fb_queue, self.__base_dir, message.get("requestId"))
upload_p.start()
self.__task_upload = upload_p
except ServiceException as s:
logger.error("文件上传请求异常: {}", s.msg)
if message.get("requestId"):
upload_result(self.__fb_queue, message.get("requestId"),
errorCode=s.code,
errorMsg=s.msg,
status=UploadTaskStatusType.FAILED.value[0])
except Exception:
logger.error("消息处理异常: {}", format_exc())
if message.get("requestId"):
upload_result(self.__fb_queue, message.get("requestId"),
errorCode=ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
errorMsg=ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
status=UploadTaskStatusType.FAILED.value[0])
finally:
del message

@staticmethod
def check_msg(msg):
def check_msg(schema, msg):
try:
v = Validator(SCHEMA, allow_unknown=True)
v = Validator(schema, allow_unknown=True)
result = v.validate(msg)
if not result:
logger.error("参数校验异常: {}", v.errors)
@@ -163,3 +189,19 @@ SCHEMA = {
'regex': r'^(https|http|rtsp|rtmp|artc|webrtc|ws)://\w.+$'
}
}

UPLOAD_SCHEMA = {
"requestId": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'command': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'allowed': ['start', 'stop']
}
}

+ 269
- 0
service/UploadFileProcess.py View File

@@ -0,0 +1,269 @@
# -*- coding: utf-8 -*-
from concurrent.futures import wait, ALL_COMPLETED, ThreadPoolExecutor
from multiprocessing import Process
from os.path import join
from threading import Thread
from time import sleep, time
from traceback import format_exc

from loguru import logger

from enums.ExceptionEnum import ExceptionType
from enums.StatusEnum import UploadTaskStatusType, UploadStatusType
from exception.CustomerException import ServiceException
from pojo.Result import upload_result
from util.AliyunUtil import OssUtil, VodUtil
from util.FileUtil import get_all_images, get_all_videos, get_file_name, delete_file, create_dir, \
move_file, remove_file, remove_after_create
from util.LogUtils import init_log
from util.RWUtils import getConfigs
from util.ThreadUtil import ThreadPoolExecutorNew
from util.TimeUtils import now_date_to_str, YMDHMS

'''
视频上传
'''


class UploadFileProcess(Process):
__slots__ = ('__fb_queue', '__base_dir', '__requestId', 'videoPath', 'imagePath', 'image_backup', 'video_backup',
'run_status', 'hd_status')

def __init__(self, fbQueue, base_dir, requestId):
super().__init__()
self.__fb_queue = fbQueue
self.__base_dir = base_dir
self.__requestId = requestId
self.__current_time = now_date_to_str(YMDHMS)
config = getConfigs(self.__base_dir, "config/application.json")
backup = join(config.get('backup'), self.__current_time)
self.videoPath, self.imagePath = config.get('videoPath'), config.get('imagePath')
self.image_backup, self.video_backup = join(backup, 'images'), join(backup, 'videos')
if self.videoPath is None or self.imagePath is None or config.get('backup') is None:
raise Exception("application.json中路径配置为空!")
self.run_status = UploadTaskStatusType.WAITING.value[0]
self.hd_status = True

@staticmethod
def uploadImage(oss, uploadPath, filePath):
try:
oss.resumable_upload(uploadPath, filePath)
except Exception as e:
oss.exception = e
logger.error("oss上传文件异常: {}, uploadPath:{}, filePath:{}", format_exc(), uploadPath, filePath)
raise e

@staticmethod
def uploadVideo(vod, file_title, filePath):
try:
vod.get_play_url(filePath, file_title)
except Exception as e:
vod.exception = e
logger.error("oss上传文件异常: {}, file_title:{}, filePath:{}", format_exc(), file_title, filePath)
raise e

@staticmethod
def updateStatus(plist, pTask, backup, t):
for i in plist:
if i["status"] < 15:
i["status"] = UploadStatusType.FAILED.value[0]
itk = pTask.get(i.get("fileName"))
if itk:
if t:
t.shutdown(wait=False)
create_dir(backup)
move_file(itk[2], backup)

@staticmethod
def move_method(file, backup):
move_file(file, backup)

def hdThrad(self, imageList, videoList):
try:
requestId = self.__requestId
fb_queue = self.__fb_queue
logger.info("启动文件上传心跳线程, requestId:{}", requestId)
start_time = time()
while self.hd_status:
if time() - start_time > 43200:
logger.error("心跳线程运行超时!!!!requestId:{}", requestId)
break
upload_result(fb_queue, requestId, status=self.run_status, imageList=imageList, videoList=videoList)
sleep(4)
except Exception:
logger.error("心跳线程异常:{}, requestId:{}", format_exc(), requestId)
logger.info("心跳线程停止完成!requestId:{}", requestId)

def upload_method(self, video_path_array, videoTask, videoList, vt, image_path_array, imageTask, imageList, it):
for i in video_path_array:
vod = VodUtil(self.__base_dir)
filename = get_file_name(i)
video_result = vt.submit(self.uploadVideo, vod, filename, i)
videoTask[filename] = [video_result, vod, i]
videoList.append({"fileName": filename, "videoUrl": "", "status": UploadStatusType.WAITING.value[0],
"progress": vod.get_progress()
})
for i in image_path_array:
oss = OssUtil(self.__base_dir)
fileName = get_file_name(i)
uploadPath = "%s/%s" % (self.__current_time, fileName)
image_result = it.submit(self.uploadImage, oss, uploadPath, i)
imageTask[fileName] = [image_result, oss, i]
imageList.append({"fileName": fileName, "imageUrl": "", "status": UploadStatusType.WAITING.value[0],
"progress": oss.progress
})

def start_hd_thread(self, imageList, videoList):
hd = Thread(target=self.hdThrad, args=(imageList, videoList))
hd.setDaemon(True)
hd.start()
return hd

def stop_hd_thread(self, hd):
if hd:
self.hd_status = False
start = time()
hd.join(120)
if time() - start > 120:
logger.error("心跳线程停止超时, requestId:{}", self.__requestId)

@staticmethod
def change_status(imageList, num, imageTask, image_backup, videoList, videoTask, video_backup, requestId):
for image in imageList:
if image["status"] < 15:
num += 1
image_task = imageTask.get(image.get("fileName"))
# (image_result, oss, i)
if image_task:
if not image_task[0].done():
image["status"] = UploadStatusType.RUNNING.value[0]
image["progress"] = image_task[1].get_progress()
continue
if image_task[0].done():
try:
image_task[0].result()
image["imageUrl"] = image_task[1].get_image_url()
image["status"] = UploadStatusType.SUCCESS.value[0]
image["progress"] = "1.0000"
delete_file(image_task[2])
except Exception:
logger.error("文件{}上传失败, 异常: {}, requestId: {}", image_task[2], format_exc(),
requestId)
image["status"] = UploadStatusType.FAILED.value[0]
image["progress"] = image_task[1].get_progress()
create_dir(image_backup)
move_file(image_task[2], image_backup)
finally:
image_task[1] = None
image_task[0] = None

for video in videoList:
if video["status"] < 15:
num += 1
video_task = videoTask.get(video.get("fileName"))
if video_task:
# 如果任务已经完成
# (video_result, vod, i)
if not video_task[0].done():
video["status"] = UploadStatusType.RUNNING.value[0]
video["progress"] = video_task[1].get_progress()
continue
if video_task[0].done():
try:
video_task[0].result()
video["videoUrl"] = video_task[1].get_video_url()
video["status"] = UploadStatusType.SUCCESS.value[0]
video["progress"] = "1.0000"
delete_file(video_task[2])
except Exception:
logger.error("文件{}上传失败, 异常:{}, requestId: {}", video_task[2], format_exc(),
requestId)
video["status"] = UploadStatusType.FAILED.value[0]
video["progress"] = video_task[1].get_progress()
create_dir(video_backup)
move_file(video_task[2], video_backup)
finally:
video_task[1] = None
video_task[0] = None
return num

@staticmethod
def wait_thread(it, vt, requestId):
if it:
it.shutdown(wait=True)
logger.info("it线程池关闭完成m, requestId:{}", requestId)
if vt:
vt.shutdown(wait=True)
logger.info("vt线程池关闭完成m, requestId:{}", requestId)
return None, None

def run(self):
imageList, videoList, imageTask, videoTask, image_backup, video_backup = [], [], {}, {}, self.image_backup, \
self.video_backup
it, vt, ex, requestId, hd, fb_queue, base_dir = None, None, None, self.__requestId, None, \
self.__fb_queue, self.__base_dir
try:
init_log(base_dir)
logger.info("启动文件上传进程!!!requestId:{}", requestId)
# 启动心跳线程
hd = self.start_hd_thread(imageList, videoList)
image_path_array, video_path_array = get_all_images(self.imagePath), get_all_videos(self.videoPath)
if len(image_path_array) == 0 and len(video_path_array) == 0:
logger.info("未查询到本地视频及图片文件!!!requestId:{}", requestId)
upload_result(self.__fb_queue, requestId, status=UploadTaskStatusType.SUCCESS.value[0])
return
it, vt = ThreadPoolExecutorNew(max_workers=10), ThreadPoolExecutorNew(max_workers=5)
self.upload_method(video_path_array, videoTask, videoList, vt, image_path_array, imageTask, imageList, it)
self.run_status = UploadTaskStatusType.RUNNING.value[0]
start_time = time()
while True:
if self.hd_status and not hd.is_alive():
logger.error("心跳线程异常停止, requestId:{}", requestId)
raise Exception("心跳线程异常停止!")
# 超时检查
if time() - start_time >= 43200:
logger.error("上传文件任务执行超时!requestId: {}", requestId)
raise ServiceException(ExceptionType.TASK_TIMEOUT_EXCEPTION.value[0],
ExceptionType.TASK_TIMEOUT_EXCEPTION.value[1])
num = 0
num = self.change_status(imageList, num, imageTask, image_backup, videoList, videoTask, video_backup,
requestId)
if num == 0:
it, vt = self.wait_thread(it, vt, requestId)
self.stop_hd_thread(hd)
upload_result(fb_queue, requestId, status=UploadTaskStatusType.SUCCESS.value[0],
imageList=imageList, videoList=videoList)
break
sleep(1)
raise Exception
except ServiceException as s:
ex = s.code, s.msg
logger.error("上传文件任务异常失败: {}, requestId:{}", s.msg, requestId)
except Exception:
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
logger.error("上传文件任务异常失败: {}, requestId:{}", format_exc(), requestId)
finally:
try:
self.wait_thread(it, vt, requestId)
self.change_status(imageList, 0, imageTask, image_backup, videoList, videoTask, video_backup,
requestId)
self.stop_hd_thread(hd)
if ex and fb_queue:
code, msg = ex
upload_result(fb_queue, requestId, errorCode=code, errorMsg=msg,
status=UploadTaskStatusType.FAILED.value[0], imageList=imageList, videoList=videoList)
tmp = join(base_dir, r'tmp\oss')
remove_after_create(tmp)
finally:
image_path_array, video_path_array = get_all_images(self.imagePath), get_all_videos(self.videoPath)
if len(image_path_array) > 0 or len(video_path_array) > 0:
with ThreadPoolExecutor(max_workers=10) as ec:
for i in image_path_array:
create_dir(image_backup)
logger.error("上传文件失败文件迁移备份, 文件名: {}, requestId: {}", i, requestId)
ec.submit(move_file, i, image_backup)
for i in video_path_array:
create_dir(video_backup)
logger.error("上传文件失败文件迁移备份, 文件名: {}, requestId: {}", i, requestId)
ec.submit(move_file, i, video_backup)
logger.info("上传文件任务完成, requestId: {}", requestId)

+ 45
- 0
test/__init__.py View File

@@ -0,0 +1,45 @@
import threading
import time
import inspect
import ctypes

from util.ThreadUtil import ThreadPoolExecutorNew


def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")

def stop_thread(thread):
_async_raise(thread.ident, SystemExit)

def print_time():
while True:
time.sleep(1)
print(111111111111)

def print_thread():
# t = ThreadPoolExecutorNew(max_workers=10)
# # t.submit(print_time)
a = threading.Thread(target=print_time)
a.daemon = True
a.start()


if __name__ == "__main__":
a = threading.Thread(target=print_thread)
a.setDaemon(True)
a.start()
while True:
time.sleep(1)
print(a.is_alive())

+ 0
- 0
test/image/__init__.py View File


+ 17
- 0
test/image/test.py View File

@@ -0,0 +1,17 @@
import os
# import glob
# def get_all_images(directory):
# images = []
# # 使用glob模块的通配符匹配查找所有图片文件
# image_files = glob.glob(os.path.join(directory, '*.jpg')) + glob.glob(os.path.join(directory, '*.jpeg')) + glob.glob(os.path.join(directory, '*.png')) + glob.glob(os.path.join(directory, '*.gif'))
# for image_file in image_files:
# if os.path.isfile(image_file):
# images.append(image_file)
# return images
# # 指定文件夹路径
# directory = '/path/to/directory'
# # 查询指定文件夹下的所有图片
# result = get_all_images(directory)
# # 打印结果
# for image in result:
# print(image)

+ 0
- 0
test/ossdemo/__init__.py View File


+ 58
- 0
test/ossdemo/examples/async_fetch_task.py View File

@@ -0,0 +1,58 @@
import os
import oss2
import base64
import time
from oss2.compat import to_bytes
from oss2.models import AsyncFetchTaskConfiguration

# 以下代码展示了创建异步获取文件到bucket任务到API的用法

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

object_name = "test-async-object"
url = "<yourSrcObjectUrl>"
callback = '{"callbackUrl":"www.abc.com/callback","callbackBody":"${etag}"}'
base64_callback = oss2.utils.b64encode_as_string(to_bytes(callback))

# 可以选填host, callback, content_md5, ignore_same_key等参数
task_config = AsyncFetchTaskConfiguration(url, object_name, callback=base64_callback, ignore_same_key=False)

# 创建异步获取文件到bucket的任务
result = bucket.put_async_fetch_task(task_config)
task_id = result.task_id
print('task_id:', result.task_id)

time.sleep(5)

# 获取指定的异步任务信息
result = bucket.get_async_fetch_task(task_id)

# 打印获取到的异步任务信息
print('=====get result======')
print('task_id:', result.task_id)
print('state:', result.task_state)
print('error_msg:', result.error_msg)
task_config = result.task_config
print('task info:')
print('url:', task_config.url)
print('object_name:', task_config.object_name)
print('host:', task_config.host)
print('content_md5:', task_config.content_md5)
print('callback:', task_config.callback)
print('ignoreSameKey:', task_config.ignore_same_key)

+ 66
- 0
test/ossdemo/examples/async_process_object.py View File

@@ -0,0 +1,66 @@
import base64
import os
import time
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')

key = 'test-video.mp4'
dest_key = 'dest_test-video'
video_path = 'your mp4 video path'

# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Upload local video files
put_result = bucket.put_object_from_file(key, video_path)
print("put object result status: %s" % put_result.status)

try:
# Set process
process = "video/convert,f_mp4,vcodec_h265,s_1920x1080,vb_2000000,fps_30,acodec_aac,ab_100000,sn_1|sys/saveas,o_{0},b_{1}".format(
oss2.compat.to_string(base64.urlsafe_b64encode(oss2.compat.to_bytes(dest_key))).replace('=', ''),
oss2.compat.to_string(base64.urlsafe_b64encode(oss2.compat.to_bytes(bucket.bucket_name))).replace('=', ''))

# Call async_ process_ Object interface
result = bucket.async_process_object(key, process)
print("async process object result status: %s" % result.status)
print(result.request_id)
print("event_id: %s" % result.event_id)
print("async_request_id: %s" % result.async_request_id)
print("task_id: %s" % result.task_id)

# Sleep for a period of time, waiting for asynchronous video processing to complete
time.sleep(10)

# Check if the processed video exists
exists = bucket.object_exists(dest_key+".mp4")
print("is exists: %s" % exists)
except oss2.exceptions.OssError as e:
pass
finally:
# Delete video files and processed files
del_key = bucket.delete_object(key)
print("delete key result: %s" % del_key.status)
del_dest_key = bucket.delete_object(dest_key+".mp4")
print("delete dest key result: %s" % del_dest_key.status)





+ 134
- 0
test/ossdemo/examples/bucket.py View File

@@ -0,0 +1,134 @@
# -*- coding: utf-8 -*-

import time
import os

import oss2


# 以下代码展示了Bucket相关操作,诸如创建、删除、列举Bucket等。


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


# 列举所有的Bucket
# 1. 先创建一个Service对象
# 2. 用oss2.BucketIterator遍历
service = oss2.Service(oss2.Auth(access_key_id, access_key_secret), endpoint)
print('\n'.join(info.name for info in oss2.BucketIterator(service)))


# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# 带权限与存储类型创建bucket
bucket.create_bucket(permission=oss2.BUCKET_ACL_PRIVATE,
input=oss2.models.BucketCreateConfig(oss2.BUCKET_STORAGE_CLASS_STANDARD))

# 获取bucket相关信息
bucket_info = bucket.get_bucket_info()
print('name: ' + bucket_info.name)
print('storage class: ' + bucket_info.storage_class)
print('creation date: ' + bucket_info.creation_date)

# 查看Bucket的状态
bucket_stat = bucket.get_bucket_stat()
print('storage: ' + str(bucket_stat.storage_size_in_bytes))
print('object count: ' + str(bucket_stat.object_count))
print('multi part upload count: ' + str(bucket_stat.multi_part_upload_count))

# 设置bucket生命周期, 有'中文/'前缀的对象在最后修改时间之后357天失效
rule = oss2.models.LifecycleRule('lc_for_chinese_prefix', '中文/', status=oss2.models.LifecycleRule.ENABLED,
expiration=oss2.models.LifecycleExpiration(days=357))

# 删除相对最后修改时间365天之后的parts
rule.abort_multipart_upload = oss2.models.AbortMultipartUpload(days=356)
# 对象最后修改时间超过180天后转为IA
rule.storage_transitions = [oss2.models.StorageTransition(days=180, storage_class=oss2.BUCKET_STORAGE_CLASS_IA)]
# 对象最后修改时间超过356天后转为ARCHIVE
rule.storage_transitions.append(oss2.models.StorageTransition(days=356, storage_class=oss2.BUCKET_STORAGE_CLASS_ARCHIVE))

lifecycle = oss2.models.BucketLifecycle([rule])
bucket.put_bucket_lifecycle(lifecycle)

# 下面只展示如何配置静态网站托管。其他的Bucket操作方式类似,可以参考tests/test_bucket.py里的内容

# 方法一:可以生成一个BucketWebsite对象来设置
bucket.put_bucket_website(oss2.models.BucketWebsite('index.html', 'error.html'))

# 方法二:可以直接设置XML
xml = '''
<WebsiteConfiguration>
<IndexDocument>
<Suffix>index2.html</Suffix>
</IndexDocument>

<ErrorDocument>
<Key>error2.html</Key>
</ErrorDocument>
</WebsiteConfiguration>
'''
bucket.put_bucket_website(xml)

# 方法三:可以从本地文件读取XML配置
# oss2.to_bytes()可以把unicode转换为bytes
with open('website_config.xml', 'wb') as f:
f.write(oss2.to_bytes(xml))

with open('website_config.xml', 'rb') as f:
bucket.put_bucket_website(f)

os.remove('website_config.xml')


# 获取配置
# 因为是分布式系统,所以配置刚刚设置好,可能还不能立即获取到,先等几秒钟
time.sleep(5)

result = bucket.get_bucket_website()
assert result.index_file == 'index2.html'
assert result.error_file == 'error2.html'


# 取消静态网站托管模式
bucket.delete_bucket_website()

# Obtain the region of the bucket
result = bucket.get_bucket_location()
print('location: ' + result.location)

# Determine whether a bucket exists
def does_bucket_exist(bucket):
try:
bucket.get_bucket_info()
except oss2.exceptions.NoSuchBucket:
return False
except:
raise
return True

exist = does_bucket_exist(bucket)
# If the returned value is true, a bucket with the specified name exists. If the returned value is false, a bucket with the specified name does not exist.
if exist:
print('bucket exist')
else:
print('bucket not exist')


# Configure the ACL of the bucket to private
bucket.put_bucket_acl(oss2.BUCKET_ACL_PRIVATE)

# Obtain the ACL of a bucket
bucket.put_bucket_acl(oss2.BUCKET_ACL_PRIVATE)
print(bucket.get_bucket_acl().acl)

+ 35
- 0
test/ossdemo/examples/bucket_access_monitor.py View File

@@ -0,0 +1,35 @@

import os
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Configure access monitor for the bucket.
# If status is set to Enabled, access monitor is enabled. If status is set to Disabled, access monitor is disabled.
status = "Enabled"
bucket.put_bucket_access_monitor(status)

# Query the access monitor status of the bucket.
result = bucket.get_bucket_access_monitor()
status = result.access_monitor.status
print("Return access monitor status: ", status)

+ 55
- 0
test/ossdemo/examples/bucket_callback_policy.py View File

@@ -0,0 +1,55 @@
import base64
import os
import oss2
from oss2.models import CallbackPolicyInfo

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Set callback policy
callback_content = "{\"callbackUrl\":\"www.abc.com/callback\",\"callbackBody\":\"${etag}\"}"
callback_content2 = "{\"callbackUrl\":\"http://www.bbc.com/test\",\"callbackHost\":\"www.bbc.com\",\"callbackBody\":\"{\\\"mimeType\\\":${mimeType},\\\"size\\\":${size}}\"}"
callback_var_content2 = "{\"x:var1\":\"value1\",\"x:var2\":\"value2\"}"
callback = base64.b64encode(callback_content.encode(encoding='utf-8'))
callback2 = base64.b64encode(callback_content2.encode(encoding='utf-8'))
callback_var2 = base64.b64encode(callback_var_content2.encode(encoding='utf-8'))

callback_policy_1 = CallbackPolicyInfo('test_1', callback)
callback_policy_2 = CallbackPolicyInfo('test_2', callback2, callback_var2)
put_result = bucket.put_bucket_callback_policy([callback_policy_1, callback_policy_2])
print("Return put status: ", put_result.status)

# Get callback policy
get_result = bucket.get_bucket_callback_policy()
print("Return get status: ", get_result.status)
print("policy name: ", get_result.callback_policies[0].policy_name)
print("callback: ", get_result.callback_policies[0].callback)
print("policy name: ", get_result.callback_policies[1].policy_name)
print("callback: ", get_result.callback_policies[1].callback)
print("callback var: ", get_result.callback_policies[1].callback_var)

# Upload File Trigger Callback
bucket.put_object("test-key.txt", "aaa", headers={'x-oss-callback': base64.b64encode("{\"callbackPolicy\":\"test_2\"}".encode(encoding='utf-8'))})

# Delete callback policy
del_result = bucket.delete_bucket_callback_policy()
print("Return delete status: ", del_result.status)

+ 63
- 0
test/ossdemo/examples/bucket_cname.py View File

@@ -0,0 +1,63 @@

import os
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')

test_domain = 'www.example.com'
cert_id = '49311111-cn-hangzhou'
previous_cert_id = '493333'
certificate = '''-----BEGIN CERTIFICATE-----
MIIDWzCCAkOgAwIBA***uYSSkW+KTgnwyOGU9cv+mxA=
-----END CERTIFICATE-----'''
private_key = '''-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqh***2t41Q/SC3HUGC5mJjpO8=
-----END PRIVATE KEY-----
'''


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Create cnametoken required for domain name ownership verification
result = bucket.create_bucket_cname_token(test_domain)
print(result.cname)
print(result.token)
print(result.expire_time)

# Get the created cnametoken.
get_result = bucket.bucket.get_bucket_cname_token(test_domain)
print(get_result.cname)
print(get_result.token)
print(get_result.expire_time)

# Bind a custom domain name to a bucket.
cert = oss2.models.CertInfo(cert_id, certificate, private_key, previous_cert_id, True, False)
input = oss2.models.PutBucketCnameRequest(test_domain, cert)
bucket.put_bucket_cname(input)

# Query the list of all cnames bound under a storage space (bucket).
list_result = bucket.list_bucket_cname()
for c in list_result.cname:
print(c.domain)
print(c.last_modified)
print(c.status)

# Delete the bound CNAME of a storage space (bucket)
bucket.delete_bucket_cname(test_domain)

+ 51
- 0
test/ossdemo/examples/bucket_cors.py View File

@@ -0,0 +1,51 @@

import os
import oss2
from oss2.models import BucketCors, CorsRule

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Configure CORS rules
rule = CorsRule(allowed_origins=['*'],
allowed_methods=['GET', 'HEAD'],
allowed_headers=['*'],
max_age_seconds=1000)

# The existing rules will be replaced.
bucket.put_bucket_cors(BucketCors([rule]))

# Obtain CORS rules
try:
cors = bucket.get_bucket_cors()
except oss2.exceptions.NoSuchCors:
print('cors is not set')
else:
for rule in cors.rules:
print('AllowedOrigins={0}'.format(rule.allowed_origins))
print('AllowedMethods={0}'.format(rule.allowed_methods))
print('AllowedHeaders={0}'.format(rule.allowed_headers))
print('ExposeHeaders={0}'.format(rule.expose_headers))
print('MaxAgeSeconds={0}'.format(rule.max_age_seconds))

# Delete CORS rules
bucket.delete_bucket_cors()

+ 129
- 0
test/ossdemo/examples/bucket_inventory.py View File

@@ -0,0 +1,129 @@
# -*- coding: utf-8 -*-

import oss2
import os
from oss2.models import (InventoryConfiguration,
InventoryFilter,
InventorySchedule,
InventoryDestination,
InventoryBucketDestination,
InventoryServerSideEncryptionKMS,
InventoryServerSideEncryptionOSS,
INVENTORY_INCLUDED_OBJECT_VERSIONS_CURRENT,
INVENTORY_INCLUDED_OBJECT_VERSIONS_ALL,
INVENTORY_FREQUENCY_DAILY,
INVENTORY_FREQUENCY_WEEKLY,
INVENTORY_FORMAT_CSV,
FIELD_SIZE,
FIELD_LAST_MODIFIED_DATE,
FIELD_STORAG_CLASS,
FIELD_ETAG,
FIELD_IS_MULTIPART_UPLOADED,
FIELD_ENCRYPTION_STATUS)

# 以下代码展示了bucket_inventory相关API的用法,

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')

account_id = '<yourtBucketDestinationAccountId>'
role_arn = '<yourBucketDestinationRoleArn>'
dest_bucket_name = '<yourBucketDestinationName>'

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint, account_id, role_arn, dest_bucket_name):
assert '<' not in param, '请设置参数:' + param

# 打印清单配置
def print_inventory_configuration(configuration):
print('======inventory configuration======')
print('inventory_id', configuration.inventory_id)
print('is_enabled', configuration.is_enabled)
print('frequency', configuration.inventory_schedule.frequency)
print('included_object_versions', configuration.included_object_versions)
print('inventory_filter prefix', configuration.inventory_filter.prefix)
print('fields', configuration.optional_fields)
bucket_destin = configuration.inventory_destination.bucket_destination
print('===bucket destination===')
print('account_id', bucket_destin.account_id)
print('role_arn', bucket_destin.role_arn)
print('bucket', bucket_destin.bucket)
print('format', bucket_destin.inventory_format)
print('destination prefix', bucket_destin.prefix)
if bucket_destin.sse_kms_encryption is not None:
print('server side encryption by kms, key id:', bucket_destin.sse_kms_encryption.key_id)
elif bucket_destin.sse_oss_encryption is not None:
print('server side encryption by oss.')

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# 创建清单配置
inventory_id = "test-config-id"
optional_fields = [FIELD_SIZE, FIELD_LAST_MODIFIED_DATE, FIELD_STORAG_CLASS,
FIELD_ETAG, FIELD_IS_MULTIPART_UPLOADED, FIELD_ENCRYPTION_STATUS]

# 设置清单bucket目的地信息。
bucket_destination = InventoryBucketDestination(
# 目的地bucket的用户account_id。
account_id=account_id,
# 目的地bucket的role_arn。
role_arn=role_arn,
# 目的地bucket的名称。
bucket=dest_bucket_name,
# 清单格式。
inventory_format=INVENTORY_FORMAT_CSV,
# 清单结果的存储路径前缀
prefix='store-prefix',
# 如果需要使用kms加密清单可以参考以下代码
# sse_kms_encryption=InventoryServerSideEncryptionKMS("test-kms-id")
# 如果需要使用OSS服务端加密清单可以参考以下代码
# sse_oss_encryption=InventoryServerSideEncryptionOSS()
)

# 创建清单配置。
inventory_configuration = InventoryConfiguration(
# 清单的配置id。
inventory_id=inventory_id,
# 是否生效。
is_enabled=True,
# 生成清单的计划。
inventory_schedule=InventorySchedule(frequency=INVENTORY_FREQUENCY_DAILY),
# 设置清单中包含的object的版本为当前版本。如果设置为INVENTORY_INCLUDED_OBJECT_VERSIONS_ALL则为所有版本,多版本环境生效。
included_object_versions=INVENTORY_INCLUDED_OBJECT_VERSIONS_CURRENT,
# 设置清单清筛选object的前缀。
inventory_filter=InventoryFilter(prefix="obj-prefix"),
# 设置清单中包含的object属性。
optional_fields=optional_fields,
# 设置清单的接收目的地配置。
inventory_destination=InventoryDestination(bucket_destination=bucket_destination))

# 设置清单配置
bucket.put_bucket_inventory_configuration(inventory_configuration)

# 获取清单配置
result = bucket.get_bucket_inventory_configuration(inventory_id = inventory_id);
print_inventory_configuration(result)

# 罗列清单配置
# 如果存在超过100条配置,罗列结果将会分页,分页信息保存在
# class:`ListInventoryConfigurationResult <oss2.models.ListInventoryConfigurationResult>`中。
result = bucket.list_bucket_inventory_configurations()
print('========list result=======')
print('is truncated', result.is_truncated)
print('continuaiton_token', result.continuaiton_token)
print('next_continuation_token', result.next_continuation_token)
for inventory_config in result.inventory_configurations:
print_inventory_configuration(inventory_config)
bucket.delete_bucket_inventory_configuration(inventory_config.id)

# 删除清单配置
bucket.delete_bucket_inventory_configuration(inventory_id)

+ 46
- 0
test/ossdemo/examples/bucket_logging.py View File

@@ -0,0 +1,46 @@

import os
import oss2
from oss2.models import BucketLogging

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Enable access logging. Store log files in the current bucket and set the log file storage directory to 'logging/'.
logging = bucket.put_bucket_logging(BucketLogging(bucket.bucket_name, 'logging/'))
if logging.status == 200:
print("Enable access logging")
else:
print("request_id :", logging.request_id)
print("resp : ", logging.resp.response)

# View access logging configurations
logging = bucket.get_bucket_logging()
print('TargetBucket={0}, TargetPrefix={1}'.format(logging.target_bucket, logging.target_prefix))

# Disable access logging
logging = bucket.delete_bucket_logging()
if logging.status == 204:
print("Disable access logging")
else:
print("request_id :", logging.request_id)
print("resp : ", logging.resp.response)

+ 42
- 0
test/ossdemo/examples/bucket_meta_query.py View File

@@ -0,0 +1,42 @@

import os
import oss2
from oss2.models import MetaQuery, AggregationsRequest

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# open meta query
bucket.open_bucket_meta_query()

# Gets the meta query information of the specified storage space (bucket)
result = bucket.get_bucket_meta_query_status()
print("Print the status of the meta query: ", result.state)

# Query the files (objects) that meet the specified conditions, and list the file information according to the specified fields and sorting method.
aggregations1 = AggregationsRequest(field='Size', operation='sum')
aggregations2 = AggregationsRequest(field='Size', operation='max')
do_meta_query_request = MetaQuery(max_results=2, query='{"Field": "Size","Value": "1048576","Operation": "lt"}', sort='Size', order='asc', aggregations=[aggregations1, aggregations2])
result = bucket.do_bucket_meta_query(do_meta_query_request)

# Turn off the meta query of the storage space (bucket).
result = bucket.close_bucket_meta_query()

+ 58
- 0
test/ossdemo/examples/bucket_policy.py View File

@@ -0,0 +1,58 @@

import os
import oss2
import json

# 以下代码展示了bucket_policy相关API的用法,
# 具体policy书写规则参考官网文档说明

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# 创建policy_text
policy=dict()
policy["Version"] = "1"
policy["Statement"] = []
statement = dict()
statement["Action"] = ["oss:PutObject"]
statement["Effect"] = "Allow"
statement["Resource"] = ["acs:oss:*:*:*/*"]
policy["Statement"].append(statement)
policy_text = json.dumps(policy)

# Put bolicy_text
print("Put policy text : ", policy_text)
bucket.put_bucket_policy(policy_text)

# Get bucket Policy
result = bucket.get_bucket_policy()
policy_json = json.loads(result.policy)
print("Get policy text: ", policy_json)

# 校验返回的policy
assert len(policy["Statement"]) == len(policy_json["Statement"])
assert policy["Version"] == policy_json["Version"]
policy_resource = policy["Statement"][0]["Resource"][0]
policy_json_resource = policy_json["Statement"][0]["Resource"][0]
assert policy_resource == policy_json_resource

# 删除policy
result = bucket.delete_bucket_policy()
assert int(result.status)//100 == 2

+ 38
- 0
test/ossdemo/examples/bucket_referer.py View File

@@ -0,0 +1,38 @@

import os
import oss2
from oss2.models import BucketReferer

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Configure the referer whitelist
# Configure to allow empty Referers (True indicates that an empty Referer is allowed, and False indicates that an empty Referer is not allowed), and configure the referer whitelist.
bucket.put_bucket_referer(BucketReferer(True, ['http://aliyun.com', 'http://*.aliyuncs.com']))

# Obtain a referer whitelist
config = bucket.get_bucket_referer()
print('allow empty referer={0}, referers={1}'.format(config.allow_empty_referer, config.referers))

# Clear a referer whitelist
# You cannot clear a referer whitelist directly. To clear a referer whitelist, you need to create the rule that allows an empty referer field and replace the original rule with the new rule.
bucket.put_bucket_referer(BucketReferer(True, []))

+ 65
- 0
test/ossdemo/examples/bucket_replication.py View File

@@ -0,0 +1,65 @@

import os
import oss2
from oss2.models import ReplicationRule

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com

access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Specify the replication rule ID, the name of the destination bucket, the region in which the destination bucket is located, and whether to synchronize historical data.
# If you do not set the rule_id parameter or you leave the rule_id parameter empty, OSS generates a unique value for this replication rule.
# If the destination bucket is located in the China (Beijing) region, set target_bucket_location to oss-cn-beijing.
# By default, OSS synchronizes historical data. If you set is_enable_historical_object_replication to false, historical data is not synchronized.
replica_config = ReplicationRule(rule_id='test_replication_1',
target_bucket_name='dstexamplebucket',
target_bucket_location='oss-cn-beijing',
is_enable_historical_object_replication=False
)
# Enable CRR for the source bucket.
bucket.put_bucket_replication(replica_config)

# Query the CRR configurations of a bucket
result = bucket.get_bucket_replication()
# Display the returned information.
for rule in result.rule_list:
print(rule.rule_id)
print(rule.target_bucket_name)
print(rule.target_bucket_location)

# Query the progress of the CRR task that is performed on the bucket.
# Specify the replication rule ID. Example: test_replication_1.
result = bucket.get_bucket_replication_progress('test_replication_1')
print(result.progress.rule_id)
# Check whether CRR is enabled for historical data in the bucket.
print(result.progress.is_enable_historical_object_replication)
# Display the progress of historical data synchronization.
print(result.progress.historical_object_progress)
# Display the progress of real-time data synchronization.
print(result.progress.new_object_progress)

# Query the regions to which data in the source bucket can be synchronized.
result = bucket.get_bucket_replication_location()
for location in result.location_list:
print(location)

# Disable CRR for this bucket.
# Specify the replication rule ID. Example: test_replication_1.
result = bucket.delete_bucket_replication('test_replication_1')

+ 33
- 0
test/ossdemo/examples/bucket_resource_group.py View File

@@ -0,0 +1,33 @@

import os
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Call the GetBucketResourceGroup interface to query the resource group ID of the storage space.
result = bucket.get_bucket_resource_group()
print(result.resource_group_id)

# Call the PutBucketResourceGroup interface to configure the resource group of the storage space.
put_result = bucket.put_bucket_resource_group(result.resource_group_id)
print(put_result.status)

+ 44
- 0
test/ossdemo/examples/bucket_style.py View File

@@ -0,0 +1,44 @@

import os
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Call the PutStyle interface to add a new picture style.
result = bucket.put_bucket_style('imagestyle','image/resize,w_200')
print(result.status)

# Call the GetStyle interface to query the image style information specified in a bucket.
get_result = bucket.get_bucket_style('imagestyle')
print(get_result.name)
print(get_result.content)

# Call the ListStyle interface to query all the image styles created in a bucket.
list_result = bucket.list_bucket_style()
print(list_result.styles[0].name)
print(list_result.styles[0].content)


# Call DeleteStyle to delete the specified picture style in a bucket.
del_result = bucket.delete_bucket_style('imagestyle')
print(del_result.status)

+ 40
- 0
test/ossdemo/examples/bucket_symlink.py View File

@@ -0,0 +1,40 @@

import os
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

objectName = '<yourObjectName>'
symlink = '<yourSymlink>';

# Upload a symbolic link.
result = bucket.put_symlink(objectName, symlink)
# View the version ID of the uploaded symbolic link.
print('symlink versionid:', result.versionid)

# Obtain the symbolic link with the specified version ID.
params = dict()
params['versionId'] = '<yourSymlinkVersionId>'
result = bucket.get_symlink(symlink, params=params)
# View the version ID of the returned symbolic link.
print('get symlink versionid:', result.versionid)

+ 50
- 0
test/ossdemo/examples/bucket_tagging.py View File

@@ -0,0 +1,50 @@

import os
import oss2
from oss2.models import Tagging, TaggingRule

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com

access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Add a tag to a bucket
# Creates a tagging rule.
rule = TaggingRule()
rule.add('key1', 'value1')
rule.add('key2', 'value2')

# Creates a tag.
tagging = Tagging(rule)
# Adds the tag to the bucket.
result = bucket.put_bucket_tagging(tagging)
# Checks the returned HTTP status code.
print('http status:', result.status)


# Obtain the tags added to a bucket
result = bucket.get_bucket_tagging()
# Views the obtained tagging rule.
tag_rule = result.tag_set.tagging_rule
print('tag rule:', tag_rule)

# Deletes the tags added to the bucket.
result = bucket.delete_bucket_tagging()
# Checks the returned HTTP status code.
print('http status:', result.status)

+ 36
- 0
test/ossdemo/examples/bucket_transfer_acceleration.py View File

@@ -0,0 +1,36 @@

import os
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Configure transfer acceleration for the bucket.
# If enabled is set to true, transfer acceleration is enabled. If enabled is set to false, transfer acceleration is disabled.
enabled = 'true'
bucket.put_bucket_transfer_acceleration(enabled)

# Query the transfer acceleration status of the bucket.
# If the returned value is true, the transfer acceleration feature is enabled for the bucket. If the returned value is false, the transfer acceleration feature is disabled for the bucket.
result = bucket.get_bucket_transfer_acceleration()
enabled_text = result.enabled
print("Returns whether to enable transfer acceleration: ", enabled_text)

+ 28
- 0
test/ossdemo/examples/bucket_user_qos.py View File

@@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-

import os
import oss2

# 以下代码展示了set_bucket_storage_capacity的的用法。

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# 设置bucket容量为100GB
user_qos = oss2.models.BucketUserQos(100)
resp = bucket.set_bucket_storage_capacity(user_qos)

# 获取容量信息
result = bucket.get_bucket_storage_capacity()
print("bucket_qos_capacity:" + result.storage_capacity)

+ 42
- 0
test/ossdemo/examples/bucket_versioning.py View File

@@ -0,0 +1,42 @@

import os
import oss2
from oss2.models import BucketVersioningConfig

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Initialize the versioning configurations for the bucket.
config = BucketVersioningConfig()
# Set the versioning state to Enabled or Suspended.
config.status = oss2.BUCKET_VERSIONING_ENABLE

# Configure versioning for the bucket.
result = bucket.put_bucket_versioning(config)
# View the HTTP status code.
print('http response code:', result.status)


# Obtain the versioning state of the bucket.
versioning_info = bucket.get_bucket_versioning()
# View the versioning state of the bucket. If versioning has been enabled, Enabled or Suspended is returned. If versioning has not been enabled, None is returned.
print('bucket versioning status:', versioning_info.status)

+ 126
- 0
test/ossdemo/examples/bucket_website.py View File

@@ -0,0 +1,126 @@
# -*- coding: utf-8 -*-

import os
import oss2
from oss2.models import (ConditionInlcudeHeader,
Condition,
Redirect,
RedirectMirrorHeaders,
MirrorHeadersSet,
RoutingRule,
BucketWebsite,
REDIRECT_TYPE_MIRROR,
REDIRECT_TYPE_EXTERNAL,
REDIRECT_TYPE_ALICDN,
REDIRECT_TYPE_INTERNAL)

# 以下代码展示了设置静态网站托管的相关操作


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

index_file = 'index.html'
error_file = 'error.html'
# 以下代码展示只设置主页与404页面的静态网站托管
bucket.put_bucket_website(BucketWebsite(index_file, error_file))

# 获取website配置
result = bucket.get_bucket_website()
print('get_bucket_website without redirect:')
print('result index_file:', result.index_file)
print('result error_file:', result.error_file)

bucket.delete_bucket_website()

# 以下代码展示镜像回源的网站托管配置,采用主备模式或者多站点模式
# 设置匹配规则
include_header1= ConditionInlcudeHeader('host', 'test.oss-cn-beijing-internal.aliyuncs.com')
include_header2 = ConditionInlcudeHeader('host', 'test.oss-cn-shenzhen-internal.aliyuncs.com')
condition1 = Condition(key_prefix_equals='key1',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
condition2 = Condition(key_prefix_equals='key2',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])

# 设置跳转规则,
mirror_headers_set_1 = MirrorHeadersSet("myheader-key5","myheader-value5")
mirror_headers_set_2 = MirrorHeadersSet("myheader-key6","myheader-value6")
set_list = [mirror_headers_set_1, mirror_headers_set_2]
pass_list = ['myheader-key1', 'myheader-key2']
remove_list = ['myheader-key3', 'myheader-key4']
mirror_header = RedirectMirrorHeaders(pass_all=True, pass_list=pass_list, remove_list=remove_list, set_list=set_list)

# 使用主备源站模式, 使用mirror_url_slave,mirror_url_probe参数
redirect1 = Redirect(redirect_type=REDIRECT_TYPE_MIRROR, pass_query_string=False, mirror_url='http://www.test.com/',
mirror_url_slave='http://www.slave.com/', mirror_url_probe='http://www.test.com/index.html', mirror_pass_query_string=False,
mirror_follow_redirect=True, mirror_check_md5=True, mirror_headers=mirror_header)

# 不指定备站
redirect2 = Redirect(redirect_type=REDIRECT_TYPE_MIRROR, mirror_url='http://www.test.com/',
mirror_pass_query_string=True, mirror_follow_redirect=True, mirror_check_md5=False)

# 可以设置一条或多条,本示例展示设置多条
rule1 = RoutingRule(rule_num=1, condition=condition1, redirect=redirect1)
rule2 = RoutingRule(rule_num=2, condition=condition2, redirect=redirect2)
website_set = BucketWebsite(index_file, error_file, [rule1, rule2])
bucket.put_bucket_website(website_set)

# 获取website配置
website_get = bucket.get_bucket_website()
print('get_bucket_website mirror type:')
print('indext_file:', website_get.index_file)
print('error_file:', website_get.error_file)
print('rule sum:', len(website_get.rules))

bucket.delete_bucket_website()

# 以下代码展示阿里云CDN跳转以及外部跳转或者内部跳转的设置
include_header1= ConditionInlcudeHeader('host', 'test.oss-cn-beijing-internal.aliyuncs.com')
include_header2 = ConditionInlcudeHeader('host', 'test.oss-cn-shenzhen-internal.aliyuncs.com')
condition1 = Condition(key_prefix_equals='key3',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
condition2 = Condition(key_prefix_equals='key4',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
condition3 = Condition(key_prefix_equals='key5',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])

# AliCDN
redirect1 = Redirect(redirect_type=REDIRECT_TYPE_ALICDN, pass_query_string=True,
replace_key_with='${key}.suffix', proto='http', http_redirect_code=302)

# External
redirect2 = Redirect(redirect_type=REDIRECT_TYPE_EXTERNAL, pass_query_string=False, replace_key_prefix_with='abc',
proto='https', host_name='oss.aliyuncs.com', http_redirect_code=302)

# Internal
redirect3 = Redirect(redirect_type=REDIRECT_TYPE_INTERNAL, pass_query_string=False, replace_key_with='${key}.suffix')

# 可以设置一条或多条规则,本示例展示设置多条
rule1 = RoutingRule(rule_num=1, condition=condition1, redirect=redirect1)
rule2 = RoutingRule(rule_num=2, condition=condition2, redirect=redirect2)
rule3 = RoutingRule(rule_num=3, condition=condition3, redirect=redirect3)
website_set = BucketWebsite(index_file, error_file, [rule1, rule2, rule3])
bucket.put_bucket_website(website_set)

# 获取website配置
website_get = bucket.get_bucket_website()
print('get_bucket_website other type:')
print('indext_file:', website_get.index_file)
print('error_file:', website_get.error_file)
print('rule sum:', len(website_get.rules))
for rule in website_get.rules:
print('rule_num:{}, redirect_type:{}'.format(rule.rule_num, rule.redirect.redirect_type))

bucket.delete_bucket_website()

+ 50
- 0
test/ossdemo/examples/bucket_worm.py View File

@@ -0,0 +1,50 @@

import os
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com

access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Create the retention policy and set the retention period to 1 days.
result = bucket.init_bucket_worm(1)
# Query the ID of the retention policy.
print(result.worm_id)

# Lock the retention policy.
bucket.complete_bucket_worm('<yourWormId>')

# Query the retention policy.
result = bucket.get_bucket_worm()

# Query the ID of the retention policy.
print(result.worm_id)
# Query the status of the retention policy. InProgress indicates that the retention policy is not locked. Locked indicates that the retention policy is locked.
print(result.state)
# Query the retention period of the retention policy.
print(result.retention_period_days)
# Query the created time of the retention policy.
print(result.creation_date)

# Cancel the unlocked retention policy.
bucket.abort_bucket_worm()

# Extend the retention period of the locked retention policy.
bucket.extend_bucket_worm('<yourWormId>', 2)

+ 242
- 0
test/ossdemo/examples/custom_crypto.py View File

@@ -0,0 +1,242 @@
# -*- coding: utf-8 -*-

import os

import oss2
from oss2.crypto import BaseCryptoProvider
from oss2.utils import b64encode_as_string, b64decode_from_string, to_bytes
from oss2.headers import *

from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from requests.structures import CaseInsensitiveDict

# 以下代码展示了用户自行提供加密算法进行客户端文件加密上传下载的用法,如下载文件、上传文件等,
# 注意在客户端加密的条件下,oss暂不支持文件分片上传下载操作。
# 本例提供了本地非对称加密密钥的加密器CustomCryptoProvider 和用于数据对称加密的FakeCrypto

# 自定义CryptoProvider

class FakeCrypto:
"""FakeCrypto 加密实现,用户自行提供的一种对称加密算法。
:param str key: 对称加密数据密钥
:param str start: 对称加密初始随机值
.. note::
用户可自行实现对称加密算法,需服务如下规则:
1、提供对称加密算法名,ALGORITHM
2、提供静态方法,返回加密密钥和初始随机值(若算法不需要初始随机值,也需要提供),类型为
3、提供加密解密方法
"""
ALGORITHM = "userdefine"

@staticmethod
def get_key():
return 'fake_key'

@staticmethod
def get_iv():
return 'fake_start'

def __init__(self, key=None, start=None, count=None):
pass

def encrypt(self, raw):
return raw

def decrypt(self, enc):
return enc


class FakeAsymmetric:
def __int__(self):
pass

def get_public_key(self):
return

def get_private_key(self):
return

def encrypt(self, data):
return data

def decrypt(self, data):
return data

class CustomCryptoProvider(BaseCryptoProvider):
"""使用本地自定义FakeAsymmetric加密数据密钥。数据使用公钥加密,私钥解密
:param class cipher: 数据加密,FakeCrypto
"""

def __init__(self, cipher=FakeCrypto):
super(CustomCryptoProvider, self).__init__(cipher=cipher)

self.public_key = FakeAsymmetric()
self.private_key = self.public_key


def build_header(self, headers=None, multipart_context=None):
if not isinstance(headers, CaseInsensitiveDict):
headers = CaseInsensitiveDict(headers)

if 'content-md5' in headers:
headers[OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_MD5] = headers['content-md5']
del headers['content-md5']

if 'content-length' in headers:
headers[OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_LENGTH] = headers['content-length']
del headers['content-length']

headers[OSS_CLIENT_SIDE_ENCRYPTION_KEY] = b64encode_as_string(self.public_key.encrypt(self.plain_key))
headers[OSS_CLIENT_SIDE_ENCRYPTION_START] = b64encode_as_string(self.public_key.encrypt(to_bytes(str(self.plain_iv))))
headers[OSS_CLIENT_SIDE_ENCRYPTION_CEK_ALG] = self.cipher.ALGORITHM
headers[OSS_CLIENT_SIDE_ENCRYPTION_WRAP_ALG] = 'custom'

# multipart file build header
if multipart_context:
headers[OSS_CLIENT_SIDE_ENCRYPTION_DATA_SIZE] = str(multipart_context.data_size)
headers[OSS_CLIENT_SIDE_ENCRYPTION_PART_SIZE] = str(multipart_context.part_size)

self.plain_key = None
self.plain_iv = None

return headers

def build_header_for_upload_part(self, headers=None):
if not isinstance(headers, CaseInsensitiveDict):
headers = CaseInsensitiveDict(headers)

if 'content-md5' in headers:
headers[OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_MD5] = headers['content-md5']
del headers['content-md5']

if 'content-length' in headers:
headers[OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_LENGTH] = headers['content-length']
del headers['content-length']

self.plain_key = None
self.plain_iv = None

return headers

def get_key(self):
self.plain_key = self.cipher.get_key()
return self.plain_key

def get_iv(self):
self.plain_iv = self.cipher.get_iv()
return self.plain_iv

def decrypt_oss_meta_data(self, headers, key, conv=lambda x:x):
try:
return conv(self.private_key.decrypt(b64decode_from_string(headers[key])))
except:
return None

def decrypt_from_str(self, key, value, conv=lambda x:x):
try:
return conv(self.private_key.decrypt(b64decode_from_string(value)))
except:
return None



# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

key = 'motto.txt'
content = b'a' * 1024 * 1024
filename = 'download.txt'


# 创建Bucket对象,可以进行客户端数据加密(用户端RSA),此模式下只提供对象整体上传下载操作
bucket = oss2.CryptoBucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name, crypto_provider=CustomCryptoProvider())

key1 = 'motto-copy.txt'

# 上传文件
bucket.put_object(key, content, headers={'content-length': str(1024 * 1024)})

"""
文件下载
"""

# 下载文件
# 原文件
result = bucket.get_object(key)

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content

# 下载原文件到本地文件
result = bucket.get_object_to_file(key, filename)

# 验证一下
with open(filename, 'rb') as fileobj:
assert fileobj.read() == content

os.remove(filename)

"""
分片上传
"""
# 初始化上传分片
part_a = b'a' * 1024 * 100
part_b = b'b' * 1024 * 100
part_c = b'c' * 1024 * 100
multi_content = [part_a, part_b, part_c]

parts = []
data_size = 100 * 1024 * 3
part_size = 100 * 1024
multi_key = "test_crypto_multipart"

res = bucket.init_multipart_upload(multi_key, data_size, part_size)
upload_id = res.upload_id
crypto_multipart_context = res.crypto_multipart_context

# 分片上传
for i in range(3):
result = bucket.upload_part(multi_key, upload_id, i+1, multi_content[i], crypto_multipart_context)
parts.append(oss2.models.PartInfo(i+1, result.etag, size = part_size, part_crc = result.crc))

## 分片上传时,若意外中断丢失crypto_multipart_context, 利用list_parts找回。
#for i in range(2):
# result = bucket.upload_part(multi_key, upload_id, i+1, multi_content[i], crypto_multipart_context)
# parts.append(oss2.models.PartInfo(i+1, result.etag, size = part_size, part_crc = result.crc))
#
#res = bucket.list_parts(multi_key, upload_id)
#crypto_multipart_context_new = res.crypto_multipart_context
#
#result = bucket.upload_part(multi_key, upload_id, 3, multi_content[2], crypto_multipart_context_new)
#parts.append(oss2.models.PartInfo(3, result.etag, size = part_size, part_crc = result.crc))

# 完成上传
result = bucket.complete_multipart_upload(multi_key, upload_id, parts)

# 下载全部文件
result = bucket.get_object(multi_key)

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got[0:102400] == part_a
assert content_got[102400:204800] == part_b
assert content_got[204800:307200] == part_c

+ 96
- 0
test/ossdemo/examples/download.py View File

@@ -0,0 +1,96 @@
# -*- coding: utf-8 -*-

import os

import oss2


# 以下代码展示了文件下载的用法,如下载文件、范围下载、断点续传下载等。


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

key = 'motto.txt'
content = oss2.to_bytes('a' * 1024 * 1024)
filename = 'download.txt'

# 上传文件
bucket.put_object(key, content, headers={'content-length': str(1024 * 1024)})

"""
文件下载
"""

# 下载文件
result = bucket.get_object(key)

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content

# 下载到本地文件
result = bucket.get_object_to_file(key, filename)

# 验证一下
with open(filename, 'rb') as fileobj:
assert fileobj.read() == content

"""
范围下载
"""

# 范围下载,如果指定的范围无效,则下载整个文件
result = bucket.get_object(key, byte_range=(0, 1023))

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == oss2.to_bytes('a'*1024)


# 范围下载到本地文件
result = bucket.get_object_to_file(key, filename, byte_range=(1024, 2047))

# 验证一下
with open(filename, 'rb') as fileobj:
assert fileobj.read() == oss2.to_bytes('a'*1024)


"""
断点续传下载
"""

# 断点续传下载
oss2.resumable_download(bucket, key, filename,
multiget_threshold=200*1024,
part_size=100*1024,
num_threads=3)

# 验证一下
with open(filename, 'rb') as fileobj:
assert fileobj.read() == content

# 清理文件
os.remove(filename)

+ 18
- 0
test/ossdemo/examples/environment_variable_credentials_provider.py View File

@@ -0,0 +1,18 @@
import oss2
from oss2.credentials import EnvironmentVariableCredentialsProvider

# Specify access information, such as Endpoint, BucketName.
# You can obtain access information from evironment variables , such as <OSS_ACCESS_KEY_ID>, <OSS_ACCESS_KEY_SECRET> and <OSS_SESSION_TOKEN>.
# Please set the above environment variables on the server before execution
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com

credentials_provider = EnvironmentVariableCredentialsProvider()
auth = oss2.ProviderAuth(credentials_provider)
bucket = oss2.Bucket(auth, '<yourEndpoint>', '<yourBucketName>')

result = bucket.put_object("sample.txt", "hello world")

print("Returns status code: ", result.status)

BIN
test/ossdemo/examples/example.jpg View File

Before After
Width: 400  |  Height: 267  |  Size: 21KB

+ 108
- 0
test/ossdemo/examples/image.py View File

@@ -0,0 +1,108 @@
# -*- coding: utf-8 -*-

import json
import os

from PIL import Image

import oss2


# 以下代码展示了图片服务的基本用法。更详细应用请参看官网文档 https://help.aliyun.com/document_detail/32206.html

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

def get_image_info(image_file):
"""获取本地图片信息
:param str image_file: 本地图片
:return tuple: a 3-tuple(height, width, format).
"""
im = Image.open(image_file)
return im.height, im.width, im.format

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

key = 'example.jpg'
new_pic = 'new-example.jpg'

# 上传示例图片
bucket.put_object_from_file(key, 'example.jpg')

# 获取图片信息
result = bucket.get_object(key, process='image/info')

json_content = result.read()
decoded_json = json.loads(oss2.to_unicode(json_content))
assert int(decoded_json['ImageHeight']['value']) == 267
assert int(decoded_json['ImageWidth']['value']) == 400
assert int(decoded_json['FileSize']['value']) == 21839
assert decoded_json['Format']['value'] == 'jpg'

# 图片缩放
process = "image/resize,m_fixed,w_100,h_100"
bucket.get_object_to_file(key, new_pic, process=process)
info = get_image_info(new_pic)
assert info[0] == 100
assert info[1] == 100
assert info[2] == 'JPEG'

# 图片裁剪
process = "image/crop,w_100,h_100,x_100,y_100,r_1"
bucket.get_object_to_file(key, new_pic, process=process)
info = get_image_info(new_pic)
assert info[0] == 100
assert info[1] == 100
assert info[2] == 'JPEG'

# 图片旋转
process = "image/rotate,90"
bucket.get_object_to_file(key, new_pic, process=process)
info = get_image_info(new_pic)
assert info[0] == 400
assert info[1] == 267
assert info[2] == 'JPEG'

# 图片锐化
process = "image/sharpen,100"
bucket.get_object_to_file(key, new_pic, process=process)
info = get_image_info(new_pic)
assert info[0] == 267
assert info[1] == 400
assert info[2] == 'JPEG'

# 图片加文字水印
process = "image/watermark,text_SGVsbG8g5Zu-54mH5pyN5YqhIQ"
bucket.get_object_to_file(key, new_pic, process=process)
info = get_image_info(new_pic)
assert info[0] == 267
assert info[1] == 400
assert info[2] == 'JPEG'

# 图片格式转换
process = "image/format,png"
bucket.get_object_to_file(key, new_pic, process=process)
info = get_image_info(new_pic)
assert info[0] == 267
assert info[1] == 400
assert info[2] == 'PNG'

# 删除示例图片
bucket.delete_object(key)
# 清除本地文件
os.remove(new_pic)

+ 111
- 0
test/ossdemo/examples/live_channel.py View File

@@ -0,0 +1,111 @@
# -*- coding: utf-8 -*-

import os
import time

import oss2


# 以下代码展示了视频直播相关接口的用法。


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<您的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint是:
# http://oss-cn-shenzhen.aliyuncs.com 或
# https://oss-cn-shenzhen.aliyuncs.com
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<您的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<您的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<您的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<您的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


# 创建Bucket对象,所有直播相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)


# 创建一个直播频道。
# 频道的名称是test_rtmp_live。直播生成的m3u8文件叫做test.m3u8,该索引文件包含3片ts文件,每片ts文件的时长为5秒(这只是一个建议值,具体的时长取决于关键帧)。
channel_name = 'test_rtmp_live'
playlist_name = 'test.m3u8'
create_result = bucket.create_live_channel(
channel_name,
oss2.models.LiveChannelInfo(
status = 'enabled',
description = '测试使用的直播频道',
target = oss2.models.LiveChannelInfoTarget(
playlist_name = playlist_name,
frag_count = 3,
frag_duration = 5)))

# 创建直播频道之后拿到推流用的play_url(rtmp推流的url,如果Bucket不是公共读写权限那么还需要带上签名,见下文示例)和观流用的publish_url(推流产生的m3u8文件的url)。
publish_url = create_result.publish_url
play_url = create_result.play_url

# 创建好直播频道之后调用get_live_channel可以得到频道相关的信息。
get_result = bucket.get_live_channel(channel_name)
print(get_result.description)
print(get_result.status)
print(get_result.target.type)
print(get_result.target.frag_count)
print(get_result.target.frag_duration)
print(get_result.target.playlist_name)

# 拿到推流地址和观流地址之后就可以向OSS推流和观流。如果Bucket的权限不是公共读写,那么还需要对推流做签名,如果Bucket是公共读写的,那么可以直接用publish_url推流。
# 这里的expires是一个相对时间,指的是从现在开始这次推流过期的秒数。
# params是一个dict类型的参数,表示用户自定义的参数。所有的参数都会参与签名。
# 拿到这个签过名的signed_url就可以使用推流工具直接进行推流,一旦连接上OSS之后超过上面的expires流也不会断掉,OSS仅在每次推流连接的时候检查expires是否合法。
expires = 3600
signed_url = bucket.sign_rtmp_url(channel_name, playlist_name, expires)

# 创建好直播频道,如果想把这个频道禁用掉(断掉正在推的流或者不再允许向一个地址推流),应该使用put_live_channel_status接口,将频道的status改成“disabled”,如果要将一个禁用状态的频道启用,那么也是调用这个接口,将status改成“enabled”。
bucket.put_live_channel_status(channel_name, 'enabled')
bucket.put_live_channel_status(channel_name, 'disabled')

# 对创建好的频道,可以使用LiveChannelIterator来进行列举已达到管理的目的。
# prefix可以按照前缀过滤list出来的频道。
# max_keys表示迭代器内部一次list出来的频道的最大数量,这个值最大不能超过1000,不填写的话默认为100。

prefix = ''
max_keys = 1000

for info in oss2.LiveChannelIterator(bucket, prefix, max_keys=max_keys):
print(info.name)

# 对于正在推流的频道调用get_live_channel_stat可以获得流的状态信息。
# 如果频道正在推流,那么stat_result中的所有字段都有意义。
# 如果频道闲置或者处于“disabled”状态,那么status为“Idle”或“Disabled”,其他字段无意义。
stat_result = bucket.get_live_channel_stat(channel_name)
print(stat_result.status)
print(stat_result.remote_addr)
print(stat_result.connected_time)
print(stat_result.video)
print(stat_result.audio)

# 如果想查看一个频道历史推流记录,可以调用get_live_channel_history。目前最多可以看到10次推流的记录
history_result = bucket.get_live_channel_history(channel_name)
print(len(history_result.records))

# 如果希望利用直播推流产生的ts文件生成一个点播列表,可以使用post_vod_playlist方法。
# 指定起始时间为当前时间减去60秒,结束时间为当前时间,这意味着将生成一个长度为60秒的点播视频。
# 播放列表指定为“vod_playlist.m3u8”,也就是说这个接口调用成功之后会在OSS上生成一个名叫“vod_playlist.m3u8”的播放列表文件。

end_time = int(time.time()) - 60
start_time = end_time - 3600
bucket.post_vod_playlist(channel_name,
playlist_name,
start_time = start_time,
end_time = end_time)

# 如果想查看指定时间段内的播放列表,可以使用get_vod_playlist
result = bucket.get_vod_playlist(channel_name, start_time=start_time, end_time=end_time)
print("playlist:", result.playlist)

# 如果一个直播频道已经不打算再使用了,那么可以调用delete_live_channel来删除频道。
bucket.delete_live_channel(channel_name)

+ 94
- 0
test/ossdemo/examples/object_basic.py View File

@@ -0,0 +1,94 @@
# -*- coding: utf-8 -*-

import os
import shutil

import oss2


# 以下代码展示了基本的文件上传、下载、罗列、删除用法。


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)


# 上传一段字符串。Object名是motto.txt,内容是一段名言。
bucket.put_object('motto.txt', 'Never give up. - Jack Ma')

# 获取Object的metadata
object_meta = bucket.get_object_meta('你的对象名')
print('last modified: ' + object_meta.last_modified)
print('etag: ' + object_meta.etag)
print('size: ' + object_meta.content_length)

# 下载到本地文件
bucket.get_object_to_file('motto.txt', '本地文件名.txt')


# 把刚刚上传的Object下载到本地文件 “座右铭.txt” 中
# 因为get_object()方法返回的是一个file-like object,所以我们可以直接用shutil.copyfileobj()做拷贝
with open(oss2.to_unicode('本地座右铭.txt'), 'wb') as f:
shutil.copyfileobj(bucket.get_object('motto.txt'), f)


# 把本地文件 “座右铭.txt” 上传到OSS,新的Object叫做 “我的座右铭.txt”
# 注意到,这次put_object()的第二个参数是file object;而上次上传是一个字符串。
# put_object()能够识别不同的参数类型
with open(oss2.to_unicode('本地座右铭.txt'), 'rb') as f:
bucket.put_object('云上座右铭.txt', f)


# 上面两行代码,也可以用下面的一行代码来实现
bucket.put_object_from_file('云上座右铭.txt', '本地座右铭.txt')


# 列举Bucket下10个Object,并打印它们的最后修改时间、文件名
for i, object_info in enumerate(oss2.ObjectIterator(bucket)):
print("{0} {1}".format(object_info.last_modified, object_info.key))

if i >= 9:
break


# 删除名为motto.txt的Object
bucket.delete_object('motto.txt')

# 也可以批量删除
# 注意:重复删除motto.txt,并不会报错
bucket.batch_delete_objects(['motto.txt', '云上座右铭.txt'])


# 确认Object已经被删除了
assert not bucket.object_exists('motto.txt')


# 获取不存在的文件会抛出oss2.exceptions.NoSuchKey异常
try:
bucket.get_object('云上座右铭.txt')
except oss2.exceptions.NoSuchKey as e:
print(u'已经被删除了:request_id={0}'.format(e.request_id))
else:
assert False

# 清除本地文件
os.remove(u'本地文件名.txt')
os.remove(u'本地座右铭.txt')

+ 89
- 0
test/ossdemo/examples/object_callback.py View File

@@ -0,0 +1,89 @@
# -*- coding: utf-8 -*-

import json
import base64
import os

import oss2


# 以下代码展示了上传回调的用法。

# put_object/complete_multipart_upload支持上传回调,resumable_upload不支持。
# 回调服务器(callbacke server)的示例代码请参考 http://shinenuaa.oss-cn-hangzhou.aliyuncs.com/images/callback_app_server.py.zip
# 您也可以使用OSS提供的回调服务器 http://oss-demo.aliyuncs.com:23450,调试您的程序。调试完成后换成您的回调服务器。

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

key = 'quote.txt'
content = "Anything you're good at contributes to happiness."

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

"""
put_object上传回调
"""

# 准备回调参数,更详细的信息请参考 https://help.aliyun.com/document_detail/31989.html
callback_dict = {}
callback_dict['callbackUrl'] = 'http://oss-demo.aliyuncs.com:23450'
callback_dict['callbackHost'] = 'oss-cn-hangzhou.aliyuncs.com'
callback_dict['callbackBody'] = 'filename=${object}&size=${size}&mimeType=${mimeType}'
callback_dict['callbackBodyType'] = 'application/x-www-form-urlencoded'
# 回调参数是json格式,并且base64编码
callback_param = json.dumps(callback_dict).strip()
base64_callback_body = oss2.utils.b64encode_as_string(callback_param)
# 回调参数编码后放在header中传给oss
headers = {'x-oss-callback': base64_callback_body}

# 上传并回调
result = bucket.put_object(key, content, headers)

# 上传并回调成功status为200,上传成功回调失败status为203
assert result.status == 200
# result.resp的内容为回调服务器返回的内容
assert result.resp.read() == b'{"Status":"OK"}'

# 确认文件上传成功
result = bucket.head_object(key)
assert result.headers['x-oss-hash-crc64ecma'] == '108247482078852440'

# 删除上传的文件
bucket.delete_object(key)

"""
分片上传回调
"""

# 分片上传回调
# 初始化上传任务
parts = []
upload_id = bucket.init_multipart_upload(key).upload_id
# 上传分片
result = bucket.upload_part(key, upload_id, 1, content)
parts.append(oss2.models.PartInfo(1, result.etag, size = len(content), part_crc = result.crc))
# 完成上传并回调
result = bucket.complete_multipart_upload(key, upload_id, parts, headers)

# 上传并回调成功status为200,上传成功回调失败status为203
assert result.status == 200
# result.resp的内容为回调服务器返回的内容
assert result.resp.read() == b'{"Status":"OK"}'

# 确认文件上传成功
result = bucket.head_object(key)
assert result.headers['x-oss-hash-crc64ecma'] == '108247482078852440'

# 删除上传的文件
bucket.delete_object(key)

+ 199
- 0
test/ossdemo/examples/object_check.py View File

@@ -0,0 +1,199 @@
# -*- coding: utf-8 -*-

import base64
import hashlib
import os
import tempfile

import oss2

# 以下代码展示了上传/下载时数据校验的用法。

# OSS支持MD5、CRC64两种数据校验。MD5校验,需要用户计算出上传内容的MD5值,并放到header的`Content-MD5`中。
# CRC64校验,上传时自动完成校验,下载时需要用户校验。OSS Python SDK默认代开CRC64校验。

# 注意:断点续传上传不支持MD5,断点续传下载不支持CRC64,其它上传/下载都支持MD5和CRC64校验。

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


def calculate_file_md5(file_name, block_size=64 * 1024):
"""计算文件的MD5
:param file_name: 文件名
:param block_size: 计算MD5的数据块大小,默认64KB
:return 文件内容的MD5值
"""
with open(file_name, 'rb') as f:
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return base64.b64encode(md5.digest())

def calculate_data_md5(data):
"""计算数据的MD5
:param data: 数据
:return MD5值
"""
md5 = hashlib.md5()
md5.update(data)
return base64.b64encode(md5.digest())


def calculate_file_crc64(file_name, block_size=64 * 1024, init_crc=0):
"""计算文件的MD5
:param file_name: 文件名
:param block_size: 计算MD5的数据块大小,默认64KB
:return 文件内容的MD5值
"""
with open(file_name, 'rb') as f:
crc64 = oss2.utils.Crc64(init_crc)
while True:
data = f.read(block_size)
if not data:
break
crc64.update(data)
return crc64.crc

def _prepare_temp_file(content):
"""创建临时文件
:param content: 文件内容
:return 文件名
"""
fd, pathname = tempfile.mkstemp(suffix='exam-progress-')
os.write(fd, content)
os.close(fd)
return pathname

key = 'story.txt'
content = 'a' * 1024 * 1024

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

"""
MD5校验
"""

# 上传数据
encode_md5 = calculate_data_md5(content)
bucket.put_object(key, content, headers={'Content-MD5': encode_md5})


# 上传文件
file_name = _prepare_temp_file(content)
encode_md5 = calculate_file_md5(file_name)
bucket.put_object_from_file(key, file_name, headers={'Content-MD5': encode_md5})

# 删除上传的文件
bucket.delete_object(key)


# 追加上传
# 第一次上传位置为0
encode_md5 = calculate_data_md5(content)
result = bucket.append_object(key, 0, content, headers={'Content-MD5': encode_md5})
# 第二次上传位置从result获取
bucket.append_object(key, result.next_position, content, headers={'Content-MD5': encode_md5})

# 删除上传的文件
bucket.delete_object(key)


# 分片上传
parts = []
upload_id = bucket.init_multipart_upload(key).upload_id
# 上传分片,每个分片单独MD5校验
encode_md5 = calculate_data_md5(content)
for i in range(3):
result = bucket.upload_part(key, upload_id, i+1, content, headers={'Content-MD5': encode_md5})
parts.append(oss2.models.PartInfo(i+1, result.etag, size = len(content), part_crc = result.crc))

# 完成上传并回调
result = bucket.complete_multipart_upload(key, upload_id, parts)


"""
CRC64校验
"""
# CRC校验默认打开。关闭CRC校验方法如下,oss2.Bucket(auth, endpoint, bucket_name, enable_crc=False)

# 上传数据,默认自动开启CRC校验
bucket.put_object(key, content)

# 删除上传的文件
bucket.delete_object(key)


# 追加上传,必须指定init_crc才会开启CRC校验
result = bucket.append_object(key, 0, content, init_crc=0)
# 第二次上传位置及init_crc从result获取
bucket.append_object(key, result.next_position, content, init_crc=result.crc)

# 删除上传的文件
bucket.delete_object(key)


# 分片上传,默认自动开启CRC校验
parts = []
upload_id = bucket.init_multipart_upload(key).upload_id
# 上传分片,每个分片单独CRC校验
for i in range(3):
result = bucket.upload_part(key, upload_id, i+1, content)
parts.append(oss2.models.PartInfo(i+1, result.etag, size = len(content), part_crc = result.crc))

# 完成上传并回调
result = bucket.complete_multipart_upload(key, upload_id, parts)


# 断点续传上传,默认自动开启CRC校验
pathname = _prepare_temp_file(content)
oss2.resumable_upload(bucket, key, pathname,
multipart_threshold=200*1024,
part_size=100*1024,
num_threads=3)


# 下载文件
result = bucket.get_object(key)
content_got = b''
for chunk in result:
content_got += chunk
assert result.client_crc == result.server_crc


# 下载文件到本地,默认开启CRC校验
local_file = 'download.txt'
result = bucket.get_object_to_file(key, local_file)
os.remove(local_file)
assert result.client_crc == result.server_crc


# 断点续传下载, 自动开启CRC校验,也可以用如下方法校验
oss2.resumable_download(bucket, key, local_file,
multiget_threshold=200*1024,
part_size=100*1024,
num_threads=3)

crc64 = calculate_file_crc64(local_file)
os.remove(local_file)

result = bucket.head_object(key)

assert str(crc64) == result.headers['x-oss-hash-crc64ecma']

+ 211
- 0
test/ossdemo/examples/object_crypto.py View File

@@ -0,0 +1,211 @@
# -*- coding: utf-8 -*-

import os
import sys
from Crypto.PublicKey import RSA
from Crypto.PublicKey.RSA import RsaKey

sys.path.append("/Users/fengyu/aliyun-oss-python-sdk")

import oss2
from oss2 import LocalRsaProvider, AliKMSProvider, RsaProvider
from oss2 import models

# 以下代码展示了客户端文件加密上传下载的用法,如下载文件、上传文件等。


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '')
bucket_name = os.getenv('OSS_TEST_BUCKET', '')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '')
cmk = os.getenv('OSS_TEST_CMK', '')
region = os.getenv('OSS_TEST_REGION', '')

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint, cmk, region):
assert '<' not in param, '请设置参数:' + param

key = 'motto.txt'
content = b'a' * 1024 * 1024
filename = 'download.txt'

private_key = '''-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQCokfiAVXXf5ImFzKDw+XO/UByW6mse2QsIgz3ZwBtMNu59fR5z
ttSx+8fB7vR4CN3bTztrP9A6bjoN0FFnhlQ3vNJC5MFO1PByrE/MNd5AAfSVba93
I6sx8NSk5MzUCA4NJzAUqYOEWGtGBcom6kEF6MmR1EKib1Id8hpooY5xaQIDAQAB
AoGAOPUZgkNeEMinrw31U3b2JS5sepG6oDG2CKpPu8OtdZMaAkzEfVTJiVoJpP2Y
nPZiADhFW3e0ZAnak9BPsSsySRaSNmR465cG9tbqpXFKh9Rp/sCPo4Jq2n65yood
JBrnGr6/xhYvNa14sQ6xjjfSgRNBSXD1XXNF4kALwgZyCAECQQDV7t4bTx9FbEs5
36nAxPsPM6aACXaOkv6d9LXI7A0J8Zf42FeBV6RK0q7QG5iNNd1WJHSXIITUizVF
6aX5NnvFAkEAybeXNOwUvYtkgxF4s28s6gn11c5HZw4/a8vZm2tXXK/QfTQrJVXp
VwxmSr0FAajWAlcYN/fGkX1pWA041CKFVQJAG08ozzekeEpAuByTIOaEXgZr5MBQ
gBbHpgZNBl8Lsw9CJSQI15wGfv6yDiLXsH8FyC9TKs+d5Tv4Cvquk0efOQJAd9OC
lCKFs48hdyaiz9yEDsc57PdrvRFepVdj/gpGzD14mVerJbOiOF6aSV19ot27u4on
Td/3aifYs0CveHzFPQJAWb4LCDwqLctfzziG7/S7Z74gyq5qZF4FUElOAZkz718E
yZvADwuz/4aK0od0lX9c4Jp7Mo5vQ4TvdoBnPuGoyw==
-----END RSA PRIVATE KEY-----'''

public_key = '''-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAKiR+IBVdd/kiYXMoPD5c79QHJbqax7ZCwiDPdnAG0w27n19HnO21LH7
x8Hu9HgI3dtPO2s/0DpuOg3QUWeGVDe80kLkwU7U8HKsT8w13kAB9JVtr3cjqzHw
1KTkzNQIDg0nMBSpg4RYa0YFyibqQQXoyZHUQqJvUh3yGmihjnFpAgMBAAE=
-----END RSA PUBLIC KEY-----'''


key_pair = {'private_key': private_key, 'public_key': public_key}
bucket = oss2.CryptoBucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name,
crypto_provider=RsaProvider(key_pair))

# 上传文件
bucket.put_object(key, content, headers={'content-length': str(1024 * 1024)})

"""
文件下载
"""

# 下载文件
# 原文件
result = bucket.get_object(key)

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk

assert content_got == content


# 下载原文件到本地文件
result = bucket.get_object_to_file(key, filename)

# 验证一下
with open(filename, 'rb') as fileobj:
assert fileobj.read() == content

os.remove(filename)

# 下载部分文件
result = bucket.get_object(key, byte_range=(0, 1024))

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content[0:1025]

# 分片上传
part_a = b'a' * 1024 * 100
part_b = b'b' * 1024 * 100
part_c = b'c' * 1024 * 100
multi_content = [part_a, part_b, part_c]

parts = []
data_size = 100 * 1024 * 3
part_size = 100 * 1024
multi_key = "test_crypto_multipart"

context = models.MultipartUploadCryptoContext(data_size, part_size)
res = bucket.init_multipart_upload(multi_key, upload_context=context)
upload_id = res.upload_id

# 分片上传
for i in range(3):
result = bucket.upload_part(multi_key, upload_id, i+1, multi_content[i], upload_context=context)
parts.append(oss2.models.PartInfo(i+1, result.etag, size=part_size, part_crc=result.crc))

# 完成上传
result = bucket.complete_multipart_upload(multi_key, upload_id, parts)

# 下载全部文件
result = bucket.get_object(multi_key)

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got[0:102400] == part_a
assert content_got[102400:204800] == part_b
assert content_got[204800:307200] == part_c

# 创建Bucket对象,可以进行客户端数据加密(使用阿里云KMS)
bucket = oss2.CryptoBucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name,
crypto_provider=AliKMSProvider(access_key_id, access_key_secret, region, cmk))

# 上传文件
bucket.put_object(key, content, headers={'content-length': str(1024 * 1024)})

"""
文件下载
"""

# 下载文件
# 原文件
result = bucket.get_object(key)

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content

# 下载原文件到本地文件
result = bucket.get_object_to_file(key, filename)

# 验证一下
with open(filename, 'rb') as fileobj:
assert fileobj.read() == content

os.remove(filename)

# 下载部分文件
result = bucket.get_object(key, byte_range=(0, 1024))

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content[0:1025]

"""
分片上传
"""
# 初始化上传分片
part_a = b'a' * 1024 * 100
part_b = b'b' * 1024 * 100
part_c = b'c' * 1024 * 100
multi_content = [part_a, part_b, part_c]

parts = []
data_size = 100 * 1024 * 3
part_size = 100 * 1024
multi_key = "test_crypto_multipart"

context = models.MultipartUploadCryptoContext(data_size, part_size)
res = bucket.init_multipart_upload(multi_key, upload_context=context)
upload_id = res.upload_id

# 分片上传时,若意外中断丢失crypto_multipart_context, 利用list_parts找回。
for i in range(3):
result = bucket.upload_part(multi_key, upload_id, i+1, multi_content[i], upload_context=context)
parts.append(oss2.models.PartInfo(i+1, result.etag, size = part_size, part_crc = result.crc))

# 完成上传
result = bucket.complete_multipart_upload(multi_key, upload_id, parts)

# 下载全部文件
result = bucket.get_object(multi_key)

# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got[0:102400] == part_a
assert content_got[102400:204800] == part_b
assert content_got[204800:307200] == part_c

+ 70
- 0
test/ossdemo/examples/object_extra.py View File

@@ -0,0 +1,70 @@
# -*- coding: utf-8 -*-

import os
from datetime import datetime

import oss2


# 以下代码展示了一些和文件相关的高级用法,如中文、设置用户自定义元数据、拷贝文件、追加上传等。


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)


# Object名、前缀名等等参数可以直接用str类型(即Python2的bytes,Python3的unicode)
# 文件内容原则上只接受bytes类型。如果用户提供了unicode类型,则转换为UTF-8编码的bytes
bucket.put_object('中文文件名.txt', '中文内容')


# 上传时携带自定义元数据
# 自定义元数据通过以x-oss-meta-开头的HTTP Header来设置
result = bucket.put_object('quote.txt', "Anything you're good at contributes to happiness.",
headers={'x-oss-meta-author': 'Russell'})

# 几乎所有的result都是RequestResult的子类,携带了一些必要的信息,可以用来调试等;
# 向阿里云客服提交工单时,能够提供request id,可以极大的方便问题的排查。
print('http-status={0} request-id={1}'.format(result.status, result.request_id))


# 修改自定义元数据
bucket.update_object_meta('quote.txt', {'x-oss-meta-author': 'Bertrand Russell'})

# 查看自定义元数据
result = bucket.head_object('quote.txt')
assert result.headers['x-oss-meta-author'] == 'Bertrand Russell'

# 也可以查看长度,最后修改时间等
print(result.content_length)
print(datetime.fromtimestamp(result.last_modified))


# 拷贝Object(适用于小文件)。这里是把quote.txt拷贝成quote-backup.txt
bucket.copy_object(bucket.bucket_name, 'quote.txt', 'quote-backup.txt')


# 有些文件可以进行追加写,比如日志文件
# 先删除可能存在的文件,即使不存在,也不会报错
bucket.delete_object('logging.txt')

# 创建可追加文件,首次偏移(position)设为0
result = bucket.append_object('logging.txt', 0, 'Hello OSS!\n')

# 追加一行数据,偏移可以从上次响应中获得。
# 当然,也可以通过head_object()获得当前长度作为偏移,只是比较低效。
bucket.append_object('logging.txt', result.next_position, 'Hello Guys!\n')


+ 140
- 0
test/ossdemo/examples/object_forbid_overwrite.py View File

@@ -0,0 +1,140 @@

import os
import oss2
from oss2 import SizedFileAdapter, determine_part_size
from oss2.models import PartInfo

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# The following code provides an example on how to disable overwrite for an object with the same name when you use simple upload:
# Upload the object.
# Specify whether the PutObject operation overwrites the object with the same name.
# By default, if x-oss-forbid-overwrite is not specified, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to false, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to true, the object with the same name is not overwritten. If the object with the same name already exists, an error is returned.
headers = dict()
headers["x-oss-forbid-overwrite"] = "true"
result = bucket.put_object('<yourObjectName>', 'content of object', headers=headers)

# Obtain the HTTP status code.
print('http status: {0}'.format(result.status))
# Obtain the unique request ID. We recommend that you add this parameter in the program logs.
print('request_id: {0}'.format(result.request_id))
# Obtain the ETag value returned by the put_object method.
print('ETag: {0}'.format(result.etag))
# Obtain the HTTP response headers.
print('date: {0}'.format(result.headers['date']))


# The following code provides an example on how to copy a small object without overwriting the object with the same object name:
# Specify whether the copy_object operation overwrites the object with the same object name.
# By default, if x-oss-forbid-overwrite is not specified, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to false, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to true, the object with the same name is not overwritten. If the object with the same name already exists, an error is returned.
headers = dict()
headers["x-oss-forbid-overwrite"] = "true"
bucket.copy_object('<yourSourceBucketName>', '<yourSourceObjectName>', '<yourDestinationObjectName>', headers=headers)


# The following code provides an example on how to disable overwrite for the object with the same name when you copy a large object by using multipart copy:
src_object = '<yourSourceObjectName>'
dst_object = '<yourDestinationObjectName>'

total_size = bucket.head_object(src_object).content_length
part_size = determine_part_size(total_size, preferred_size=100 * 1024)

# Initiate a multipart copy task.
# Specify whether to overwrite an object with the same name when copying an object.
# By default, if x-oss-forbid-overwrite is not specified, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to false, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to true, the object with the same name is not overwritten. If the object with the same name already exists, an error is returned.
headers = dict()
headers["x-oss-forbid-overwrite"] = "true"
upload_id = bucket.init_multipart_upload(dst_object, headers=headers).upload_id
parts = []

# Copy each part sequentially.
part_number = 1
offset = 0
while offset < total_size:
num_to_upload = min(part_size, total_size - offset)
byte_range = (offset, offset + num_to_upload - 1)

result = bucket.upload_part_copy(bucket.bucket_name, src_object, byte_range,dst_object, upload_id, part_number)
parts.append(PartInfo(part_number, result.etag))

offset += num_to_upload
part_number += 1

# Complete multipart copy.
# Specify whether to overwrite an object with the same name when copying an object.
# By default, if x-oss-forbid-overwrite is not specified, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to false, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to true, the object with the same name is not overwritten. If the object with the same name already exists, an error is returned.
headers = dict()
headers["x-oss-forbid-overwrite"] = "true"
bucket.complete_multipart_upload(dst_object, upload_id, parts, headers=headers)


# The following code provides an example on how to disable overwrite for the object with the same name when you use multipart upload:
key = '<yourObjectName>'
filename = '<yourLocalFile>'

total_size = os.path.getsize(filename)
# Use the determine_part_size method to determine the size of each part.
part_size = determine_part_size(total_size, preferred_size=100 * 1024)

# Initiate a multipart upload task.
# Specify whether to overwrite the object with the same name when performing multipart upload.
# By default, if x-oss-forbid-overwrite is not specified, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to false, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to true, the object with the same name is not overwritten. If the object with the same name already exists, an error is returned.
headers["x-oss-forbid-overwrite"] = "true"
upload_id = bucket.init_multipart_upload(key, headers=headers).upload_id
parts = []

# Upload each part sequentially.
with open(filename, 'rb') as fileobj:
part_number = 1
offset = 0
while offset < total_size:
num_to_upload = min(part_size, total_size - offset)
# The SizedFileAdapter(fileobj, size) method generates a new object and recalculates the length of the append object.
result = bucket.upload_part(key, upload_id, part_number,
SizedFileAdapter(fileobj, num_to_upload))
parts.append(PartInfo(part_number, result.etag))

offset += num_to_upload
part_number += 1

# Complete multipart upload.
# Specify whether to overwrite the object with the same name when performing multipart upload.
# By default, if x-oss-forbid-overwrite is not specified, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to false, the object with the same name is overwritten.
# If x-oss-forbid-overwrite is set to true, the object with the same name is not overwritten. If the object with the same name already exists, an error is returned.
headers["x-oss-forbid-overwrite"] = "true"
bucket.complete_multipart_upload(key, upload_id, parts, headers=headers)

# Verify multipart upload.
with open(filename, 'rb') as fileobj:
assert bucket.get_object(key).read() == fileobj.read()

+ 41
- 0
test/ossdemo/examples/object_operation.py View File

@@ -0,0 +1,41 @@

import os
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# Determine whether an object exists
# Specify the full path of the object. The full path of the object cannot contain bucket names.
exist = bucket.object_exists('exampleobject.txt')
# If the returned value is true, the specified object exists. If the returned value is false, the specified object does not exist.
if exist:
print('object exist')
else:
print('object not exist')


# Configure the ACL for the object.
bucket.put_object_acl('<yourObjectName>', oss2. OBJECT_ACL_PUBLIC_READ)

# Obtain the ACL for an object.
print(bucket.get_object_acl('<yourObjectName>').acl)

+ 197
- 0
test/ossdemo/examples/object_post.py View File

@@ -0,0 +1,197 @@
# -*- coding: utf-8 -*-

import time
import datetime
import json
import base64
import hmac
import hashlib
import os
import crcmod
import requests


# 以下代码展示了PostObject的用法。PostObject不依赖于OSS Python SDK。

# POST表单域的详细说明请参RFC2388 https://tools.ietf.org/html/rfc2388
# PostObject的官网 https://help.aliyun.com/document_detail/31988.html
# PostObject错误及排查 https://yq.aliyun.com/articles/58524

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

def calculate_crc64(data):
"""计算文件的MD5
:param data: 数据
:return 数据的MD5值
"""
_POLY = 0x142F0E1EBA9EA3693
_XOROUT = 0XFFFFFFFFFFFFFFFF

crc64 = crcmod.Crc(_POLY, initCrc=0, xorOut=_XOROUT)
crc64.update(data)

return crc64.crcValue

def build_gmt_expired_time(expire_time):
"""生成GMT格式的请求超时时间
:param int expire_time: 超时时间,单位秒
:return str GMT格式的超时时间
"""
now = int(time.time())
expire_syncpoint = now + expire_time

expire_gmt = datetime.datetime.fromtimestamp(expire_syncpoint).isoformat()
expire_gmt += 'Z'

return expire_gmt

def build_encode_policy(expired_time, condition_list):
"""生成policy
:param int expired_time: 超时时间,单位秒
:param list condition_list: 限制条件列表
"""
policy_dict = {}
policy_dict['expiration'] = build_gmt_expired_time(expired_time)
policy_dict['conditions'] = condition_list

policy = json.dumps(policy_dict).strip()
policy_encode = base64.b64encode(policy)

return policy_encode

def build_signature(access_key_secret, encode_policy):
"""生成签名
:param str access_key_secret: access key secret
:param str encode_policy: 编码后的Policy
:return str 请求签名
"""
h = hmac.new(access_key_secret, encode_policy, hashlib.sha1)
signature = base64.encodestring(h.digest()).strip()
return signature

def bulid_callback(cb_url, cb_body, cb_body_type=None, cb_host=None):
"""生成callback字符串
:param str cb_url: 回调服务器地址,文件上传成功后OSS向此url发送回调请求
:param str cb_body: 发起回调请求的Content-Type,默认application/x-www-form-urlencoded
:param str cb_body_type: 发起回调时请求body
:param str cb_host: 发起回调请求时Host头的值
:return str 编码后的Callback
"""
callback_dict = {}

callback_dict['callbackUrl'] = cb_url

callback_dict['callbackBody'] = cb_body
if cb_body_type is None:
callback_dict['callbackBodyType'] = 'application/x-www-form-urlencoded'
else:
callback_dict['callbackBodyType'] = cb_body_type

if cb_host is not None:
callback_dict['callbackHost'] = cb_host

callback_param = json.dumps(callback_dict).strip()
base64_callback = base64.b64encode(callback_param);

return base64_callback

def build_post_url(endpoint, bucket_name):
"""生成POST请求URL
:param str endpoint: endpoint
:param str bucket_name: bucket name
:return str POST请求URL
"""
if endpoint.startswith('http://'):
return endpoint.replace('http://', 'http://{0}.'.format(bucket_name))
elif endpoint.startswith('https://'):
return endpoint.replace('https://', 'https://{0}.'.format(bucket_name))
else:
return 'http://{0}.{1}'.format(bucket_name, endpoint)

def build_post_body(field_dict, boundary):
"""生成POST请求Body
:param dict field_dict: POST请求表单域
:param str boundary: 表单域的边界字符串
:return str POST请求Body
"""
post_body = b''

# 编码表单域
for k,v in field_dict.iteritems():
if k != 'content' and k != 'content-type':
post_body += '''--{0}\r\nContent-Disposition: form-data; name=\"{1}\"\r\n\r\n{2}\r\n'''.format(boundary, k, v)

# 上传文件的内容,必须作为最后一个表单域
post_body += '''--{0}\r\nContent-Disposition: form-data; name=\"file\"; filename=\"{1}\"\r\nContent-Type: {2}\r\n\r\n{3}'''.format(
boundary, field_dict['key'], field_dict['content-type'], field_dict['content'])

# 加上表单域结束符
post_body += '\r\n--{0}--\r\n'.format(boundary)

return post_body

def build_post_headers(body_len, boundary, headers=None):
"""生气POST请求Header
:param str body_len: POST请求Body长度
:param str boundary: 表单域的边界字符串
:param dict 请求Header
"""
headers = headers if headers else {}
headers['Content-Length'] = str(body_len)
headers['Content-Type'] = 'multipart/form-data; boundary={0}'.format(boundary)

return headers


# POST请求表单域,注意大小写
field_dict = {}
# object名称
field_dict['key'] = 'post.txt'
# access key id
field_dict['OSSAccessKeyId'] = access_key_id
# Policy包括超时时间(单位秒)和限制条件condition
field_dict['policy'] = build_encode_policy(120, [['eq','$bucket', bucket_name],
['content-length-range', 0, 104857600]])
# 请求签名
field_dict['Signature'] = build_signature(access_key_secret, field_dict['policy'])
# 临时用户Token,当使用临时用户密钥时Token必填;非临时用户填空或不填
field_dict['x-oss-security-token'] = ''
# Content-Disposition
field_dict['Content-Disposition'] = 'attachment;filename=download.txt'
# 用户自定义meta
field_dict['x-oss-meta-uuid'] = 'uuid-xxx'
# callback,没有回调需求不填该域
field_dict['callback'] = bulid_callback('http://oss-demo.aliyuncs.com:23450',
'filename=${object}&size=${size}&mimeType=${mimeType}',
'application/x-www-form-urlencoded')
# callback中的自定义变量,没有回调不填该域
field_dict['x:var1'] = 'callback-var1-val'
# 上传文件内容
field_dict['content'] = 'a'*64
# 上传文件类型
field_dict['content-type'] = 'text/plain'

# 表单域的边界字符串,一般为随机字符串
boundary = '9431149156168'

# 发送POST请求
body = build_post_body(field_dict, boundary)
headers = build_post_headers(len(body), boundary)

resp = requests.post(build_post_url(endpoint, bucket_name),
data=body,
headers=headers)

# 确认请求结果
assert resp.status_code == 200
assert resp.content == '{"Status":"OK"}'
assert resp.headers['x-oss-hash-crc64ecma'] == str(calculate_crc64(field_dict['content']))

+ 132
- 0
test/ossdemo/examples/object_progress.py View File

@@ -0,0 +1,132 @@
# -*- coding: utf-8 -*-

import os
import sys
import tempfile

import oss2

# 以下代码展示了进度条功能的用法,包括上传进度条和下载进度条。


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

def percentage(consumed_bytes, total_bytes):
"""进度条回调函数,计算当前完成的百分比
:param consumed_bytes: 已经上传/下载的数据量
:param total_bytes: 总数据量
"""
if total_bytes:
rate = int(100 * (float(consumed_bytes) / float(total_bytes)))
print('\r{0}% '.format(rate))
sys.stdout.flush()

def _prepare_temp_file(content):
"""创建临时文件
:param content: 文件内容
:return 文件名
"""
fd, pathname = tempfile.mkstemp(suffix='exam-progress-')
os.write(fd, content)
os.close(fd)
return pathname

key = 'story.txt'
content = 'a' * 1024 * 1024

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

"""
流式上传
"""
# 带有进度条的覆盖上传
bucket.put_object(key, content, progress_callback=percentage)

# 删除上传的文件
bucket.delete_object(key)

"""
追加上传
"""
# 带有进度条的追加上传,每一次追加一个进度条
# 创建可追加文件,首次偏移(position)设为0
result = bucket.append_object(key, 0, content, progress_callback=percentage)
# 追加一行数据,偏移可以从上次响应中获得
# 当然,也可以通过head_object()获得当前长度作为偏移,只是比较低效
bucket.append_object(key, result.next_position, content, progress_callback=percentage)

# 删除上传的文件
bucket.delete_object(key)

"""
分片上传
"""
# 带有进度条的分片上传,每个分片上传一个进度条
parts = []
upload_id = bucket.init_multipart_upload(key).upload_id

# 上传分片
for i in range(3):
result = bucket.upload_part(key, upload_id, i+1, content, progress_callback=percentage)
parts.append(oss2.models.PartInfo(i+1, result.etag, size = len(content), part_crc = result.crc))

# 完成上传并回调
result = bucket.complete_multipart_upload(key, upload_id, parts)

"""
断点续传上传
"""
# 带进度条的断点续传
pathname = _prepare_temp_file(content)
oss2.resumable_upload(bucket, key, pathname,
multipart_threshold=200*1024,
part_size=100*1024,
num_threads=3,
progress_callback=percentage)

"""
文件下载
"""
# 带进度条的下载
result = bucket.get_object(key, progress_callback=percentage)
content_got = b''
for chunk in result:
content_got += chunk
assert content == content_got

"""
范围下载
"""
# 带进度条的范围下载
result = bucket.get_object(key, byte_range=(1024, 2047), progress_callback=percentage)
content_got = b''
for chunk in result:
content_got += chunk
assert 'a'*1024 == content_got

"""
断点续传下载
"""
# 带进度条的断点续传下载
filename = 'download.txt'
oss2.resumable_download(bucket, key, filename,
multiget_threshold=200*1024,
part_size=100*1024,
num_threads=3,
progress_callback=percentage)
os.remove(filename)

# 删除上传的文件
bucket.delete_object(key)

+ 36
- 0
test/ossdemo/examples/object_request_payment.py View File

@@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-

import os
import oss2
from oss2.headers import OSS_REQUEST_PAYER

# 以下代码展示了第三方付费请求object的示例
# 在此之前需要Bucket的拥有者给第三方请授权,并开启请求者付费模式。

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你要请求的Bucket名称>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

object_name = 'test-request-object'

# header中指定请求者付费
headers = dict()
headers[OSS_REQUEST_PAYER] = "requester"

# 上传文件, 需要指定header
result = bucket.put_object(object_name, 'test-content', headers=headers)
print('http response status: ', result.status)

# 删除文件, 需要指定header.
result = bucket.delete_object(object_name, headers=headers);
print('http response status: ', result.status)

+ 51
- 0
test/ossdemo/examples/object_restore.py View File

@@ -0,0 +1,51 @@

import os
import oss2
from oss2.models import (RestoreJobParameters,
RestoreConfiguration,
RESTORE_TIER_EXPEDITED,
RESTORE_TIER_STANDARD,
RESTORE_TIER_BULK)

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

object_name = "<yourObjectName>"
# Restore archived objects
bucket.restore_object(object_name)

# Restore cold archived objects
# Refer to the following code if you set the storage class of the object to upload to Cold Archive.
# bucket.put_object(object_name, '<yourContent>', headers={"x-oss-storage-class": oss2.BUCKET_STORAGE_CLASS_COLD_ARCHIVE})

# Configure the restore mode of the cold archived object. # RESTORE_TIER_EXPEDITED: The object is restored within one hour.
# RESTORE_TIER_STANDARD: The object is restored within two to five hours.
# RESTORE_TIER_BULK: The object is restored within five to twelve hours.
job_parameters = RestoreJobParameters(RESTORE_TIER_STANDARD)

# Configure parameters. For example, set the restore mode of the object to Standard and set the duration for which the object can remain in the restored state to two days.
# The days parameter indicates the duration for which the object can remain in the restored state. The default value is one day. This parameter applies to archived objects and cold archived objects.
# The job_parameters parameter indicates the restore mode of the object. This parameter applies only to cold archived objects.
restore_config= RestoreConfiguration(days=2, job_parameters=job_parameters)

# Initiate a restore request.
bucket.restore_object(object_name, input=restore_config)

+ 60
- 0
test/ossdemo/examples/object_server_crypto.py View File

@@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-

import os
import shutil

import oss2
from oss2.headers import RequestHeader


# 以下代码展示了其用服务端加密功能的各项操作


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


key = 'server-crypto.txt'
content = b'a' * 1024 * 1024

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)


# 上传文件使用服务端AES256进行加密
myHeader = RequestHeader()
myHeader.set_server_side_encryption("AES256")
bucket.put_object(key, content, headers = myHeader)

# 下载文件验证一下
result = bucket.get_object(key)
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content

# 上传文件使用服务端KMS进行加密
myHeader = RequestHeader()
myHeader.set_server_side_encryption("KMS", cmk_id = "11111")
bucket.put_object(key, content, headers = myHeader)

# 下载文件验证一下
result = bucket.get_object(key)
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content

+ 55
- 0
test/ossdemo/examples/object_storage_type.py View File

@@ -0,0 +1,55 @@

import os
import oss2
import time

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

object_name = '<yourObjectName>'

# The following code provides an example of how to convert the storage class of an object from Standard or IA to Archive:
# Add a header that specifies the storage class. Set the storage class to Archive.
headers = {'x-oss-storage-class': oss2.BUCKET_STORAGE_CLASS_ARCHIVE}

# Modify the storage class of the object.
bucket.copy_object(bucket.bucket_name, object_name, object_name, headers)

# The following code provides an example of how to convert the storage class of an object from Archive to IA:
# Obtain the object metadata.
meta = bucket.head_object(object_name)

# Check whether the storage class of the object is Archive. If the storage class of the source object is Archive, you must restore the object before you can modify the storage class. Wait for about one minute until the object is restored.
if meta.resp.headers['x-oss-storage-class'] == oss2.BUCKET_STORAGE_CLASS_ARCHIVE:
bucket.restore_object(object_name)
while True:
meta = bucket.head_object(object_name)
if meta.resp.headers['x-oss-restore'] == 'ongoing-request="true"':
time.sleep(5)
else:
break

# Add a header that specifies the storage class. Set the storage class to IA.
headers = {'x-oss-storage-class': oss2.BUCKET_STORAGE_CLASS_IA}

# Modify the storage class of the object.
bucket.copy_object(bucket.bucket_name, object_name, object_name, headers)

+ 463
- 0
test/ossdemo/examples/object_tagging.py View File

@@ -0,0 +1,463 @@

import os
import oss2
import datetime
from oss2.headers import OSS_OBJECT_TAGGING, OSS_OBJECT_TAGGING_COPY_DIRECTIVE
from oss2 import SizedFileAdapter, determine_part_size
from oss2.headers import OSS_OBJECT_TAGGING
from oss2.models import (LifecycleExpiration, LifecycleRule,
BucketLifecycle, AbortMultipartUpload,
TaggingRule, Tagging, StorageTransition, PartInfo)

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Create a bucket. You can use the bucket to call all object-related operations
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# The following code provides an example on how to add tags to an object when you upload the object by using simple upload:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'

# Configure the tagging string.
tagging = "k1=v1&k2=v2&k3=v3"

# If tags contain characters, you must encode the keys and values of the tags by using URL encoding.
k4 = "k4+-="
v4 = "+-=._:/"
tagging += "&" + oss2.urlquote(k4) + "=" + oss2.urlquote(v4)

# Configure the tags in the HTTP headers.
headers = dict()
headers[OSS_OBJECT_TAGGING] = tagging

# Specify the headers when you call the put_object operation so that the tags are added to the object when it is uploaded.
result = bucket.put_object(object_name, 'content', headers=headers)
print('http response status: ', result.status)

# Query the tags added to the object.
result = bucket.get_object_tagging(object_name)
for key in result.tag_set.tagging_rule:
print('tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))


# The following code provides an example on how to add tags to an object when you upload the object by using multipart upload:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'
# Specify the full path of the local file that you want to upload. Example: D:\\localpath\\examplefile.txt.
# By default, if you specify only the name of the local file such as examplefile.txt without specifying the local path, the local file is uploaded from the path of the project to which the sample program belongs.
filename = 'D:\\localpath\\examplefile.txt'

total_size = os.path.getsize(filename)
# Use the determine_part_size method to determine the size of each part.
part_size = determine_part_size(total_size, preferred_size=100 * 1024)

# Configure the tagging string.
tagging = "k1=v1&k2=v2&k3=v3"

# If tags contain characters, you must encode the keys and values of the tags by using URL encoding.
k4 = "k4+-="
v4 = "+-=._:/"
tagging += "&" + oss2.urlquote(k4) + "=" + oss2.urlquote(v4)

# Configure the tags in the HTTP headers.
headers = dict()
headers[OSS_OBJECT_TAGGING] = tagging

# Initiate a multipart upload task.
# Specify the headers when you call the init_multipart_upload operation so that the tags are added to the object to upload.
upload_id = bucket.init_multipart_upload(object_name, headers=headers).upload_id
parts = []

# Upload the parts one by one.
with open(filename, 'rb') as fileobj:
part_number = 1
offset = 0
while offset < total_size:
num_to_upload = min(part_size, total_size - offset)
# The SizedFileAdapter(fileobj, size) method generates a new object and recalculates the position from which the append operation starts.
result = bucket.upload_part(object_name, upload_id, part_number,
SizedFileAdapter(fileobj, num_to_upload))
parts.append(PartInfo(part_number, result.etag))

offset += num_to_upload
part_number += 1

# Complete the multipart upload task.
result = bucket.complete_multipart_upload(object_name, upload_id, parts)
print('http response status: ', result.status)

# Query the tags added to the object.
result = bucket.get_object_tagging(object_name)
for key in result.tag_set.tagging_rule:
print('tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))

# Verify the result of the multipart upload task.
with open(filename, 'rb') as fileobj:
assert bucket.get_object(object_name).read() == fileobj.read()


# The following code provides an example on how to add tags to an object when you upload the object by using append upload:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'

# Configure the tagging string.
tagging = "k1=v1&k2=v2&k3=v3"

# If tags contain characters, you must encode the keys and values of the tags by using URL encoding.
k4 = "k4+-="
v4 = "+-=._:/"
tagging += "&" + oss2.urlquote(k4) + "=" + oss2.urlquote(v4)

# Configure the tags in the HTTP headers.
headers = dict()
headers[OSS_OBJECT_TAGGING] = tagging

# Append the object. Specify the headers when you call the append_object operation so that the tags are added to the object.
# Only the tags configured the first time the object is appended are added to the object.
result = bucket.append_object(object_name, 0, '<yourContent>', headers=headers)

# Query the tags added to the object.
result = bucket.get_object_tagging(object_name)
for key in result.tag_set.tagging_rule:
print('tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))


# The following code provides an example on how to add tags to an object when you upload the object by using resumable upload:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'
# Specify the full path of the local file. By default, if you do not specify the path of the local file, the local file is uploaded from the path of the project to which the sample program belongs.
local_file = 'D:\\localpath\\examplefile.txt'

# Configure the tagging string.
tagging = "k1=v1&k2=v2&k3=v3"

# If tags contain characters, you must encode the keys and values of the tags by using URL encoding.
k4 = "k4+-="
v4 = "+-=._:/"
tagging += "&" + oss2.urlquote(k4) + "=" + oss2.urlquote(v4)

# Configure the tags in the HTTP headers.
headers = dict()
headers[OSS_OBJECT_TAGGING] = tagging

# When the object length is greater than or equal to the value of the multipart_threshold parameter, multipart upload is used. The multipart_threshold parameter is optional. The default value of multipart_threshold is 10 MB. If you do not specify a directory by using the store parameter, the .py-oss-upload directory is created in the HOME directory to store the checkpoint information.
# Specify the headers when you call the resumable_upload operation so that the tags are added to the object to be uploaded.
oss2.resumable_upload(bucket, object_name, local_file, headers=headers)

result = bucket.get_object_tagging(object_name)
for key in result.tag_set.tagging_rule:
print('object tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))


# The following code provides an example on how to add tags to or modify the tags of an existing object:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'

# Create a tagging rule.
rule = TaggingRule()
rule.add('key1', 'value1')
rule.add('key2', 'value2')

# Create a tag.
tagging = Tagging(rule)

# Add the tag to the object.
result = bucket.put_object_tagging(object_name, tagging)
# Query the HTTP status code.
print('http response status:', result.status)


# The following code provides an example on how to add tags to a specified version of an object or modify the tags of the object:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'
# Specify the version ID of the object. Example: CAEQMxiBgICAof2D0BYiIDJhMGE3N2M1YTI1NDQzOGY5NTkyNTI3MGYyMzJm****.
version_id = 'CAEQMxiBgICAof2D0BYiIDJhMGE3N2M1YTI1NDQzOGY5NTkyNTI3MGYyMzJm****'

tagging = Tagging()
# Specify the key and value of the object tag. Example: the key is owner, and the value is John.
tagging.tag_set.add('owner', 'John')
tagging.tag_set.add('type', 'document')

params = dict()
params['versionId'] = version_id

bucket.put_object_tagging(object_name, tagging, params=params)


# The following code provides an example on how to add tags to an object smaller than 1 GB when you copy it by calling CopyObject:
# Specify the full path of the source object. Example: srcexampledir/exampleobject.txt.
src_object_name = 'srcexampledir/exampleobject.txt'
# Specify the full path of the destination object. Example: destexampledir1/exampleobject.txt.
dest_object_name1 = 'destexampledir1/exampleobject.txt'
# Specify the full path of the destination object. Example: destexampledir2/exampleobject.txt.
dest_object_name2 = 'destexampledir2/exampleobject.txt'

# Configure the tagging string.
tagging = "k1=v1&k2=v2&k3=v3"

# If tags contain characters, you must encode the keys and values of the tags by using URL encoding.
k4 = "k4+-="
v4 = "+-=._:/"
tagging += "&" + oss2.urlquote(k4) + "=" + oss2.urlquote(v4)

# Set OSS_OBJECT_TAGGING_COPY_DIRECTIVE to COPY or keep the default value in the HTTP headers, so that the tags of the source object are added to the dest_object_name1 object.
headers=dict()
headers[OSS_OBJECT_TAGGING_COPY_DIRECTIVE] = 'COPY'
bucket.copy_object(bucket.bucket_name, src_object_name, dest_object_name1, headers=headers)

# Set OSS_OBJECT_TAGGING_COPY_DIRECTIVE to REPLACE in the HTTP headers, so that the tags specified in OSS_OBJECT_TAGGING are added to the dest_object_name2 object.
headers[OSS_OBJECT_TAGGING_COPY_DIRECTIVE] = 'REPLACE'
headers[OSS_OBJECT_TAGGING] = tagging
bucket.copy_object(bucket.bucket_name, src_object_name, dest_object_name2, headers=headers)

# Query the tags added to the src_object_name object.
result = bucket.get_object_tagging(src_object_name)
for key in result.tag_set.tagging_rule:
print('src tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))

# Query the tags added to the dest_object_name1 object. The tags added to the dest_object_name1 object are the same as those of the src_object_name object.
result = bucket.get_object_tagging(dest_object_name1)
for key in result.tag_set.tagging_rule:
print('dest1 object tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))

# Query the tags added to the dest_object_name2 object. The tags added to the dest_object_name2 object are those specified in headers[OSS_OBJECT_TAGGING].
result = bucket.get_object_tagging(dest_object_name2)
for key in result.tag_set.tagging_rule:
print('dest2 object tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))


# The following code provides an example on how to add tags to an object larger than 1 GB when you copy it by calling MultipartUpload:
# Specify the full path of the source object. Example: srcexampledir/exampleobject.txt.
src_object_name = 'srcexampledir/exampleobject.txt'
# Specify the full path of the destination object. Example: destexampledir/exampleobject.txt.
dest_object_name = 'destexampledir/exampleobject.txt'

# Obtain the size of the source object.
head_info = bucket.head_object(src_object_name)
total_size = head_info.content_length
print('src object size:', total_size)

# Use the determine_part_size method to determine the size of each part.
part_size = determine_part_size(total_size, preferred_size=100 * 1024)
print('part_size:', part_size)

# Configure the tagging string.
tagging = "k1=v1&k2=v2&k3=v3"

# If tags contain characters, you must encode the keys and values of the tags by using URL encoding.
k4 = "k4+-="
v4 = "+-=._:/"
tagging += "&" + oss2.urlquote(k4) + "=" + oss2.urlquote(v4)

# Configure the tags in the HTTP headers.
headers = dict()
headers[OSS_OBJECT_TAGGING] = tagging

# Initiate a multipart copy task.
# Specify the headers when you call the init_multipart_upload operation so that the tags are added to the destination object.
upload_id = bucket.init_multipart_upload(dest_object_name, headers=headers).upload_id
parts = []

# Upload the parts one by one.
part_number = 1
offset = 0
while offset < total_size:
num_to_upload = min(part_size, total_size - offset)
end = offset + num_to_upload - 1;
result = bucket.upload_part_copy(bucket.bucket_name, src_object_name, (offset, end), dest_object_name, upload_id, part_number)
# Save the part information.
parts.append(PartInfo(part_number, result.etag))

offset += num_to_upload
part_number += 1

# Complete the multipart upload task.
result = bucket.complete_multipart_upload(dest_object_name, upload_id, parts)

# Obtain the metadata of the destination object.
head_info = bucket.head_object(dest_object_name)

# Query the size of the destination object.
dest_object_size = head_info.content_length
print('dest object size:', dest_object_size)

# Compare the size of the destination object with that of the source object.
assert dest_object_size == total_size

# Query the tags of the source object.
result = bucket.get_object_tagging(src_object_name)
for key in result.tag_set.tagging_rule:
print('src tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))

# Query the tags added to the destination object.
result = bucket.get_object_tagging(dest_object_name)
for key in result.tag_set.tagging_rule:
print('dest tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))


# The following code provides an example on how to add tags to a symbolic link:
# Specify the full path of the destination object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'
# Specify the full path of the symbolic link object. Example: shortcut/myobject.txt.
symlink_name = 'shortcut/myobject.txt'

# Configure the tagging string.
tagging = "k1=v1&k2=v2&k3=v3"

# If tags contain characters, you must encode the keys and values of the tags by using URL encoding.
k4 = "k4+-="
v4 = "+-=._:/"
tagging += "&" + oss2.urlquote(k4) + "=" + oss2.urlquote(v4)

# Configure the tags in the HTTP headers.
headers = dict()
headers[OSS_OBJECT_TAGGING] = tagging

# Add a symbolic link to the object.
# Specify the headers when you call the put_symlink operation so that the tags are added to the symbolic link.
result = bucket.put_symlink(object_name, symlink_name, headers=headers)
print('http response status: ', result.status)

# Query the tags added to the symbolic link.
result = bucket.get_object_tagging(symlink_name)
for key in result.tag_set.tagging_rule:
print('tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))



# The following code provides an example on how to query the tags of the exampleobject.txt object in the exampledir directory of the examplebucket bucket:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'

# Query the tags of the object.
result = bucket.get_object_tagging(object_name)

# View the tags of the object.
for key in result.tag_set.tagging_rule:
print('tagging key: {}, value: {}'.format(key, result.tag_set.tagging_rule[key]))


# The following code provides an example on how to query the tags of a specified version of the exampleobject.txt object in the exampledir directory of the examplebucket bucket:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'
# Specify the version ID of the object. Example: CAEQMxiBgICAof2D0BYiIDJhMGE3N2M1YTI1NDQzOGY5NTkyNTI3MGYyMzJm****.
version_id = 'CAEQMxiBgICAof2D0BYiIDJhMGE3N2M1YTI1NDQzOGY5NTkyNTI3MGYyMzJm****'

params = dict()
params['versionId'] = version_id

result = bucket.get_object_tagging(object_name, params=params)
print(result)



# The following code provides an example on how to delete the tags of the exampleobject.txt object in the exampledir directory of the examplebucket bucket:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'

# Remove the tags of the object.
result = bucket.delete_object_tagging(object_name)
print('http response status: ', result.status)


# The following code provides an example on how to delete the tags of a specified version of the exampleobject.txt object in the exampledir directory of the examplebucket bucket:
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain the bucket name.
object_name = 'exampledir/exampleobject.txt'
# Specify the version ID of the object.
version_id = 'CAEQMxiBgICAof2D0BYiIDJhMGE3N2M1YTI1NDQzOGY5NTkyNTI3MGYyMzJm****'

params = dict()
params['versionId'] = version_id
bucket.delete_object_tagging(object_name, params=params)



# The following code provides an example on how to add tagging configurations to a lifecycle rule:
# Specify that objects expire three days after they are last modified.
# Set the name of the expiration rule and the prefix to match the objects.
rule1 = LifecycleRule('rule1', 'tests/',
# Enable the expiration rule.
status=LifecycleRule.ENABLED,
# Set the validity period to three days after the last modified date.
expiration=LifecycleExpiration(days=3))

# Specify that the objects last modified before the specified date expire.
# Set the name of the expiration rule and the prefix to match the objects.
rule2 = LifecycleRule('rule2', 'logging-',
# Disable the expiration rule.
status=LifecycleRule.DISABLED,
# Specify that the objects last modified before the specified date expire.
expiration=LifecycleExpiration(created_before_date=datetime.date(2018, 12, 12)))

# Specify that parts expire three days after they are last modified.
rule3 = LifecycleRule('rule3', 'tests1/',
status=LifecycleRule.ENABLED,
abort_multipart_upload=AbortMultipartUpload(days=3))

# Specify that the parts last modified before the specified date expire.
rule4 = LifecycleRule('rule4', 'logging1-',
status=LifecycleRule.DISABLED,
abort_multipart_upload = AbortMultipartUpload(created_before_date=datetime.date(2018, 12, 12)))

# Configure tags to match objects.
tagging_rule = TaggingRule()
tagging_rule.add('key1', 'value1')
tagging_rule.add('key2', 'value2')
tagging = Tagging(tagging_rule)

# Configure the rule to convert the storage class of objects. Specify that the storage class of objects is converted to Archive 365 days after the objects are last modified.
# Tags to match objects are specified in rule5. The rule applies only to objects that match tag conditions of key1=value1 and key2=value2.
rule5 = LifecycleRule('rule5', 'logging2-',
status=LifecycleRule.ENABLED,
storage_transitions=[StorageTransition(days=365, storage_class=oss2.BUCKET_STORAGE_CLASS_ARCHIVE)],
tagging = tagging)

lifecycle = BucketLifecycle([rule1, rule2, rule3, rule4, rule5])

bucket.put_bucket_lifecycle(lifecycle)


# The following code provides an example on how to view the tagging configurations of a lifecycle rule:
# View the lifecycle rules.
lifecycle = bucket.get_bucket_lifecycle()

for rule in lifecycle.rules:
# View the part expiration rules.
if rule.abort_multipart_upload is not None:
print('id={0}, prefix={1}, tagging={2}, status={3}, days={4}, created_before_date={5}'
.format(rule.id, rule.prefix, rule.tagging, rule.status,
rule.abort_multipart_upload.days,
rule.abort_multipart_upload.created_before_date))

# View the object expiration rules.
if rule.expiration is not None:
print('id={0}, prefix={1}, tagging={2}, status={3}, days={4}, created_before_date={5}'
.format(rule.id, rule.prefix, rule.tagging, rule.status,
rule.expiration.days,
rule.expiration.created_before_date))
# View the rules to convert the storage class.
if len(rule.storage_transitions) > 0:
storage_trans_info = ''
for storage_rule in rule.storage_transitions:
storage_trans_info += 'days={0}, created_before_date={1}, storage_class={2} **** '.format(
storage_rule.days, storage_rule.created_before_date, storage_rule.storage_class)

print('id={0}, prefix={1}, tagging={2}, status={3},, StorageTransition={4}'
.format(rule.id, rule.prefix, rule.tagging, rule.status, storage_trans_info))

+ 71
- 0
test/ossdemo/examples/qos_info.py View File

@@ -0,0 +1,71 @@
# -*- coding: utf-8 -*-

import os
import oss2
from oss2.models import BucketQosInfo

# 以下代码展示User以及Bucket的QoSInfo的操作示例

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你要请求的Bucket名称>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

# 以下代码展示user qos info的get操作示例
# 获取user qos info
service = oss2.Service(oss2.Auth(access_key_id, access_key_secret), endpoint)
user_qos_info = service.get_user_qos_info()
print('===Get user qos info===')
print('region:', user_qos_info.region)
print('total_upload_bw:', user_qos_info.total_upload_bw)
print('intranet_upload_bw:', user_qos_info.intranet_upload_bw)
print('extranet_upload_bw:', user_qos_info.extranet_upload_bw)
print('total_download_bw:', user_qos_info.total_download_bw)
print('intranet_download_bw:', user_qos_info.intranet_download_bw)
print('extranet_download_bw:', user_qos_info.extranet_download_bw)
print('total_qps:', user_qos_info.total_qps)
print('intranet_qps:', user_qos_info.intranet_qps)
print('extranet_qps:', user_qos_info.extranet_qps)

# 以下代码展示bucket qos info的put get delete操作示例
# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# 创建BucketQosInfo对象, -1表示不做单独限制, 带宽单位为Gbps,具体设置规则请参考官网文档
bucket_qos_info = BucketQosInfo(
total_upload_bw = -1,
intranet_upload_bw = 2,
extranet_upload_bw = 2,
total_download_bw = -1,
intranet_download_bw = -1,
extranet_download_bw = -1,
total_qps = -1,
intranet_qps = -1,
extranet_qps = -1)

# 设置bucket qos info
result = bucket.put_bucket_qos_info(bucket_qos_info)
print('http response status:', result.status)

# 获取bucket qos info
bucket_qos_info = bucket.get_bucket_qos_info()
print('===Get bucket qos info===')
print('total_upload_bw:', bucket_qos_info.total_upload_bw)
print('intranet_upload_bw:', bucket_qos_info.intranet_upload_bw)
print('extranet_upload_bw:', bucket_qos_info.extranet_upload_bw)
print('total_download_bw:', bucket_qos_info.total_download_bw)
print('intranet_download_bw:', bucket_qos_info.intranet_download_bw)
print('extranet_download_bw:', bucket_qos_info.extranet_download_bw)
print('total_qps:', bucket_qos_info.total_qps)
print('intranet_qps:', bucket_qos_info.intranet_qps)
print('extranet_qps:', bucket_qos_info.extranet_qps)

# 删除bucket qos info配置
result = bucket.delete_bucket_qos_info()
print('http response status:', result.status)

+ 43
- 0
test/ossdemo/examples/sdk_logging.py View File

@@ -0,0 +1,43 @@

import os
import oss2
import logging
from itertools import islice

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<yourBucketName>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set parameters:' + param


# Download log information to a local log file, and store the log file in the specified local path.
# By default, if you specify the name of a local file such as examplelogfile.log without specifying the local path, the local file is saved to the local path of the project to which the sample program belongs.
log_file_path = "D:\\localpath\\examplelogfile.log"

# Enable log recording.
oss2.set_file_logger(log_file_path, 'oss2', logging.INFO)
# Security risks may arise if you use the AccessKey pair of an Alibaba Cloud account to access OSS because the account has permissions on all API operations. We recommend that you use a RAM user to call API operations or perform routine O&M. To create a RAM user, log on to the RAM console.
auth = oss2.Auth('yourAccessKeyId', 'yourAccessKeySecret')
# Set yourEndpoint to the endpoint of the region in which the bucket is located. For example, if your bucket is located in the China (Hangzhou) region, set yourEndpoint to https://oss-cn-hangzhou.aliyuncs.com.
# Specify the name of the bucket. Example: examplebucket.
bucket = oss2.Bucket(auth, 'yourEndpoint', 'examplebucket')

# Traverse objects and directories.
for b in islice(oss2.ObjectIterator(bucket), 10):
print(b.key)
# Obtain the metadata of the object.
# Specify the full path of the object. Example: exampledir/exampleobject.txt. The full path of the object cannot contain bucket names.
object_meta = bucket.get_object_meta('exampledir/exampleobject.txt')

+ 53
- 0
test/ossdemo/examples/select_csv.py View File

@@ -0,0 +1,53 @@
# -*- coding: utf-8 -*-

import os
import oss2


def select_call_back(consumed_bytes, total_bytes = None):
print('Consumed Bytes:' + str(consumed_bytes) + '\n')
# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')
# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
#objects = bucket.list_objects()
key = 'python_select.csv'
content = 'Tom Hanks,USA,45\r\n'*1024
filename = 'python_select.csv'

# 上传文件
bucket.put_object(key, content)

csv_meta_params = {'RecordDelimiter': '\r\n'}

select_csv_params = {'CsvHeaderInfo': 'None',
'RecordDelimiter': '\r\n',
'LineRange': (500, 1000)}

csv_header = bucket.create_select_object_meta(key, csv_meta_params)
print(csv_header.csv_rows)
print(csv_header.csv_splits)

result = bucket.select_object(key, "select * from ossobject where _3 > 44 limit 100000", select_call_back, select_csv_params)
content_got = b''
for chunk in result:
content_got += chunk
print(content_got)
result = bucket.select_object_to_file(key, filename,
"select * from ossobject where _3 > 44 limit 100000", select_call_back, select_csv_params)

bucket.delete_object(key)

+ 108
- 0
test/ossdemo/examples/server_side_encryption.py View File

@@ -0,0 +1,108 @@
# -*- coding: utf-8 -*-

import os
import oss2
from oss2.models import ServerSideEncryptionRule
from oss2 import (SERVER_SIDE_ENCRYPTION_KMS, SERVER_SIDE_ENCRYPTION_AES256,
SERVER_SIDE_ENCRYPTION_SM4, KMS_DATA_ENCRYPTION_SM4)
from oss2.headers import (OSS_SERVER_SIDE_ENCRYPTION, OSS_SERVER_SIDE_ENCRYPTION_KEY_ID,
OSS_SERVER_SIDE_DATA_ENCRYPTION)
from oss2 import SizedFileAdapter, determine_part_size
from oss2.models import PartInfo

# 以下代码展示了服务端加密设置的示例。

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你要请求的Bucket名称>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# ##########以下是设置bucket服务端加密的示例##############
# 以设置AES256加密为例。
rule = ServerSideEncryptionRule()
rule.sse_algorithm = SERVER_SIDE_ENCRYPTION_AES256
bucket.put_bucket_encryption(rule)

# 获取服务端加密配置。
result = bucket.get_bucket_encryption()
print('sse_algorithm:', result.sse_algorithm)
print('kms_key_id:', result.kms_master_keyid)
print('data_algorithm:', result.kms_data_encryption)

# ##########以下是使用put_object接口上传文件时单独指定文件的服务端加密方式的示例############
key = 'test_put_object'

# 在headers中指定加密方式。
headers = dict()
# 使用KMS加密
headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
# 数据使用SM4算法
headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4

# 使用put_object接口上传文件时指定新的加密方式。
result = bucket.put_object(key, b'123', headers=headers)
sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
print('sse_algorithm:', sse_algo)
print('data_algorithm:', data_algo)
print('kms_key_id:', kms_key_id)

# 读取上传内容
object_stream = bucket.get_object(key)
print(object_stream.read())

# ##########以下是使用分片上传接口上传文件时单独指定文件的服务端加密方式的示例############
key = 'test-upload_file'
filename = '<yourLocalFile>'

total_size = os.path.getsize(filename)
# determine_part_size方法用来确定分片大小。
part_size = determine_part_size(total_size, preferred_size=100 * 1024)

# 在headers中指定加密方式
headers = dict()
# 使用OSS server端SM4加密算法
headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_SM4

# 初始化分片时指定文件在服务端端加密类型
result = bucket.init_multipart_upload(key, headers=headers)
sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
print('sse_algorithm:', sse_algo)
print('data_algorithm:', data_algo)
print('kms_key_id:', kms_key_id)

upload_id = result.upload_id
parts = []

# 逐个上传分片。
with open(filename, 'rb') as fileobj:
part_number = 1
offset = 0
while offset < total_size:
num_to_upload = min(part_size, total_size - offset)
# SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
result = bucket.upload_part(key, upload_id, part_number,
SizedFileAdapter(fileobj, num_to_upload))
parts.append(PartInfo(part_number, result.etag))

offset += num_to_upload
part_number += 1

# 完成分片上传。
bucket.complete_multipart_upload(key, upload_id, parts)

# 验证分片上传。
with open(filename, 'rb') as fileobj:
assert bucket.get_object(key).read() == fileobj.read()

+ 91
- 0
test/ossdemo/examples/sign_v2.py View File

@@ -0,0 +1,91 @@
# -*- coding: utf-8 -*-

import os
import oss2
import requests
import datetime
import time
import hashlib
import hmac


# 下面的代码展示了使用OSS V2签名算法来对请求进行签名


# 首先,初始化AccessKeyId、AccessKeySecret和Endpoint.
# 你可以通过设置环境变量来设置access_key_id等, 或者直接使用真实access_key_id替换'<Your AccessKeyId>'等
#
# 以杭州(华东1)作为例子, endpoint应该是
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 对HTTP和HTTPS请求,同样的处理
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<Your AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<Your AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<Your Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<Your Endpoint>')


if not endpoint.startswith('http://') and not endpoint.startswith('https://'):
endpoint = 'http://' + endpoint


# 验证access_key_id和其他参数都被合理地初始化
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set variable: ' + param


# 创建一个AuthV2对象,这样我们就可以用V2算法来签名请求。也可以使用oss2.make_auth函数,默认采用V1算法
auth = oss2.AuthV2(access_key_id, access_key_secret)
# auth = oss2.make_auth(access_key_id, access_key_secret, oss2.AUTH_VERSION_2)

# 创建一个Bucket,利用它进行所有bucket与object相关操作
bucket = oss2.Bucket(auth, endpoint, bucket_name)

content = b'Never give up. - Jack Ma'

# 上传一个Object
bucket.put_object('motto.txt', content)

# 下载一个object
result = bucket.get_object('motto.txt')

assert result.read() == content

# 生成一个签名的URL,将在60秒后过期
url = bucket.sign_url('GET', 'motto.txt', 60)

print(url)


# 人工构造一个使用V2签名的请求
key = 'object-from-post.txt'

boundary = 'arbitraryboundaryvalue'
headers = {'Content-Type': 'multipart/form-data; boundary=' + boundary}
encoded_policy = oss2.utils.b64encode_as_string(oss2.to_bytes('{ "expiration": "%s","conditions": [["starts-with", "$key", ""]]}'
% oss2.date_to_iso8601(datetime.datetime.utcfromtimestamp(int(time.time()) + 60))))

digest = hmac.new(oss2.to_bytes(access_key_secret), oss2.to_bytes(encoded_policy), hashlib.sha256).digest()
signature = oss2.utils.b64encode_as_string(digest)

form_fields = {
'x-oss-signature-version': 'OSS2',
'x-oss-signature': signature,
'x-oss-access-key-id': access_key_id,
'policy': encoded_policy,
'key': key,
}

# 对象的内容
content = 'file content for post object request'

body = ''

for k, v in form_fields.items():
body += '--%s\r\nContent-Disposition: form-data; name="%s"\r\n\r\n%s\r\n' % (boundary, k, v)

body += '--%s\r\nContent-Disposition: form-data; name="file"; filename="%s"\r\n\r\n%s\r\n' % (boundary, key, content)
body += '--%s\r\nContent-Disposition: form-data; name="submit"\r\n\r\nUpload to OSS\r\n--%s--\r\n' % (boundary, boundary)

p = oss2.urlparse(endpoint)
requests.post('%s://%s.%s' % (p.scheme, bucket_name, p.netloc), data=body, headers=headers)

+ 57
- 0
test/ossdemo/examples/sign_v4.py View File

@@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-

import os
import oss2


# 下面的代码展示了使用OSS V4签名算法来对请求进行签名


# 首先,初始化AccessKeyId、AccessKeySecret和Endpoint.
# 你可以通过设置环境变量来设置access_key_id等, 或者直接使用真实access_key_id替换'<Your AccessKeyId>'等
#
# 以杭州(华东1)作为例子, endpoint应该是
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 对HTTP和HTTPS请求,同样的处理
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<Your AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<Your AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<Your Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<Your Endpoint>')
region = os.getenv('OSS_TEST_REGION', '<Your Region>')


if not endpoint.startswith('http://') and not endpoint.startswith('https://'):
endpoint = 'http://' + endpoint


# 验证access_key_id和其他参数都被合理地初始化
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set variable: ' + param


# 创建一个AuthV4对象,这样我们就可以用V4算法来签名请求。也可以使用oss2.make_auth函数,默认采用V1算法
auth = oss2.AuthV4(access_key_id, access_key_secret)
# auth = oss2.make_auth(access_key_id, access_key_secret, oss2.AUTH_VERSION_4)

# 创建一个Bucket,利用它进行所有bucket与object相关操作
bucket = oss2.Bucket(auth, endpoint, bucket_name, region=region)

service = oss2.Service(auth, endpoint, region=region)

content = b'Never give up. - Jack Ma'

# 上传一个Object
bucket.put_object('motto.txt', content)

# 下载一个object
result = bucket.get_object('motto.txt')

assert result.read() == content

# 生成一个签名的URL,将在60秒后过期
url = bucket.sign_url('GET', 'motto.txt', 60)

print(url)



+ 102
- 0
test/ossdemo/examples/sts.py View File

@@ -0,0 +1,102 @@
# -*- coding: utf-8 -*-

import json
import os

from aliyunsdkcore import client
from aliyunsdksts.request.v20150401 import AssumeRoleRequest

import oss2


# 以下代码展示了STS的用法,包括角色扮演获取临时用户的密钥、使用临时用户的密钥访问OSS。

# STS入门教程请参看 https://yq.aliyun.com/articles/57895
# STS的官方文档请参看 https://help.aliyun.com/document_detail/28627.html

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
# 注意:AccessKeyId、AccessKeySecret为子用户的密钥。
# RoleArn可以在控制台的“访问控制 > 角色管理 > 管理 > 基本信息 > Arn”上查看。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_STS_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_STS_KEY', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')
sts_role_arn = os.getenv('OSS_TEST_STS_ARN', '<你的Role Arn>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint, sts_role_arn):
assert '<' not in param, '请设置参数:' + param


class StsToken(object):
"""AssumeRole返回的临时用户密钥
:param str access_key_id: 临时用户的access key id
:param str access_key_secret: 临时用户的access key secret
:param int expiration: 过期时间,UNIX时间,自1970年1月1日UTC零点的秒数
:param str security_token: 临时用户Token
:param str request_id: 请求ID
"""
def __init__(self):
self.access_key_id = ''
self.access_key_secret = ''
self.expiration = 0
self.security_token = ''
self.request_id = ''


def fetch_sts_token(access_key_id, access_key_secret, role_arn):
"""子用户角色扮演获取临时用户的密钥
:param access_key_id: 子用户的 access key id
:param access_key_secret: 子用户的 access key secret
:param role_arn: STS角色的Arn
:return StsToken: 临时用户密钥
"""
clt = client.AcsClient(access_key_id, access_key_secret, 'cn-hangzhou')
req = AssumeRoleRequest.AssumeRoleRequest()

req.set_accept_format('json')
req.set_RoleArn(role_arn)
req.set_RoleSessionName('oss-python-sdk-example')

body = clt.do_action_with_exception(req)

j = json.loads(oss2.to_unicode(body))

token = StsToken()

token.access_key_id = j['Credentials']['AccessKeyId']
token.access_key_secret = j['Credentials']['AccessKeySecret']
token.security_token = j['Credentials']['SecurityToken']
token.request_id = j['RequestId']
token.expiration = oss2.utils.to_unixtime(j['Credentials']['Expiration'], '%Y-%m-%dT%H:%M:%SZ')

return token


# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
token = fetch_sts_token(access_key_id, access_key_secret, sts_role_arn)
auth = oss2.StsAuth(token.access_key_id, token.access_key_secret, token.security_token)
bucket = oss2.Bucket(auth, endpoint, bucket_name)


# 上传一段字符串。Object名是motto.txt,内容是一段名言。
bucket.put_object('motto.txt', 'Never give up. - Jack Ma')


# 下载到本地文件
bucket.get_object_to_file('motto.txt', '本地座右铭.txt')


# 删除名为motto.txt的Object
bucket.delete_object('motto.txt')


# 清除本地文件
os.remove(u'本地座右铭.txt')

+ 63
- 0
test/ossdemo/examples/traffic_limit.py View File

@@ -0,0 +1,63 @@
# -*- coding: utf-8 -*-

import os
import oss2
from oss2.models import OSS_TRAFFIC_LIMIT

# 以下代码展示了限速上传下载文件的设置方法

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你要请求的Bucket名称>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')

# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

OBJECT_SIZE_1MB = (1 * 1024 * 1024)
LIMIT_100KB = (100 * 1024 * 8)

headers = dict()
headers[OSS_TRAFFIC_LIMIT] = str(LIMIT_100KB);

key = 'traffic-limit-test-put-object'
content = b'a' * OBJECT_SIZE_1MB

# 限速上传文件
result = bucket.put_object(key, content, headers=headers)
print('http response status:', result.status)

# 限速下载文件到本地
file_name = key + '.txt'
result = bucket.get_object_to_file(key, file_name, headers=headers)
print('http response status:', result.status)

os.remove(file_name)
bucket.delete_object(key)

# 使用签名url方式限速上传文件
params = dict()
params[OSS_TRAFFIC_LIMIT] = str(LIMIT_100KB);
local_file_name = "example.jpg"

# 创建限速上传文件的签名url, 有效期60s
url = bucket.sign_url('PUT', key, 60, params=params)
# 限速上传
result = bucket.put_object_with_url_from_file(url, local_file_name)
print('http response status:', result.status)

# 创建限速下载文件的签名url, 有效期60s
down_file_name = key + '.tmp'
url = bucket.sign_url('GET', key, 60, params=params)
# 限速下载
result = bucket.get_object_with_url_to_file(url, down_file_name)
print('http response status:', result.status)

os.remove(down_file_name)
bucket.delete_object(key)

+ 94
- 0
test/ossdemo/examples/upload.py View File

@@ -0,0 +1,94 @@
# -*- coding: utf-8 -*-

import os
import random
import string
import oss2


# 以下代码展示了文件上传的高级用法,如断点续传、分片上传等。
# 基本的文件上传如上传普通文件、追加文件,请参见object_basic.py


# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param


# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)


def random_string(n):
return ''.join(random.choice(string.ascii_lowercase) for i in range(n))

# 生成一个本地文件用于测试。文件内容是bytes类型。
filename = random_string(32) + '.txt'
content = oss2.to_bytes(random_string(1024 * 1024))

with open(filename, 'wb') as fileobj:
fileobj.write(content)


"""
断点续传上传
"""

# 断点续传一:因为文件比较小(小于oss2.defaults.multipart_threshold),
# 所以实际上用的是oss2.Bucket.put_object
oss2.resumable_upload(bucket, 'remote-normal.txt', filename)

# 断点续传二:为了展示的需要,我们指定multipart_threshold可选参数,确保使用分片上传
oss2.resumable_upload(bucket, 'remote-multipart.txt', filename, multipart_threshold=100 * 1024)


"""
分片上传
"""

# 也可以直接调用分片上传接口。
# 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
total_size = os.path.getsize(filename)
part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)

# 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
key = 'remote-multipart2.txt'
upload_id = bucket.init_multipart_upload(key).upload_id

# 逐个上传分片
# 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于size_to_upload
with open(filename, 'rb') as fileobj:
parts = []
part_number = 1
offset = 0
while offset < total_size:
size_to_upload = min(part_size, total_size - offset)
result = bucket.upload_part(key, upload_id, part_number,
oss2.SizedFileAdapter(fileobj, size_to_upload))
parts.append(oss2.models.PartInfo(part_number, result.etag, size = size_to_upload, part_crc = result.crc))

offset += size_to_upload
part_number += 1

# 完成分片上传
bucket.complete_multipart_upload(key, upload_id, parts)

# 验证一下
with open(filename, 'rb') as fileobj:
assert bucket.get_object(key).read() == fileobj.read()


os.remove(filename)

+ 48
- 0
test/ossdemo/test.py View File

@@ -0,0 +1,48 @@
import sys
import time

import cv2

import osssdk

# p1 = cv2.imread(r"C:\Users\chenyukun\Pictures\1.png")
# a, b = cv2.imencode(".jpg", p1)

access_key = "LTAI5tSJ62TLMUb4SZuf285A"
access_secret = "MWYynm30filZ7x0HqSHlU3pdLVNeI7"
endpoint = "http://oss-cn-shanghai.aliyuncs.com"
bucket = "th-airprt-media"
### 简单上传
start_1 = time.time()
auth = osssdk.Auth(access_key, access_secret)
bucket = osssdk.Bucket(auth, endpoint, bucket)


# 上传二进制
# bucket.put_object('aa.jpg', b.tobytes())
# 上传文件
# bucket.put_object_from_file('aa.jpg', r"C:\Users\chenyukun\Pictures\1.png")
# 上传时间: 0.5720009803771973

### 断点续传
# 当无法确定待上传的数据长度时,total_bytes的值为None。
def percentage(consumed_bytes, total_bytes):
if total_bytes:
rate = int(100 * (float(consumed_bytes) / float(total_bytes)))
print('\r{0}% '.format(rate), end='')
sys.stdout.flush()


# 如果使用store指定了目录,则断点信息将保存在指定目录中。如果使用num_threads设置并发上传线程数,请将oss2.defaults.connection_pool_size设置为大于或等于并发上传线程数。默认并发上传线程数为1。
result = osssdk.resumable_upload(bucket, 'aa.jpg', r"C:\Users\chenyukun\Pictures\1.png",
store=osssdk.ResumableStore(root='/tmp'),
# 指定当文件长度大于或等于可选参数multipart_threshold(默认值为10 MB)时,则使用分片上传。
multipart_threshold=100 * 1024,
# 设置分片大小,单位为字节,取值范围为100 KB~5 GB。默认值为100 KB。
part_size=100 * 1024,
# 设置上传回调进度函数。
progress_callback=percentage,
# 如果使用num_threads设置并发上传线程数,请将oss2.defaults.connection_pool_size设置为大于或等于并发上传线程数。默认并发上传线程数为1。
num_threads=2)
print(result.__dict__.get("resp").__dict__)
print(time.time() - start_1)

+ 4
- 0
test/ossdemo/tests/.gitattributes View File

@@ -0,0 +1,4 @@
invalid_sample_data.csv binary
sample_data.csv binary
sample_json.json binary
sample_json_lines.json binary

+ 0
- 0
test/ossdemo/tests/__init__.py View File


+ 261
- 0
test/ossdemo/tests/common.py View File

@@ -0,0 +1,261 @@
# -*- coding: utf-8 -*-

import os
import random
import string
import unittest
import time
import tempfile
import errno
import logging
from Crypto.PublicKey import RSA
from Crypto.PublicKey.RSA import RsaKey
import oss2

logging.basicConfig(level=logging.DEBUG)

OSS_ID = os.getenv("OSS_TEST_ACCESS_KEY_ID")
OSS_SECRET = os.getenv("OSS_TEST_ACCESS_KEY_SECRET")
OSS_ENDPOINT = os.getenv("OSS_TEST_ENDPOINT")
OSS_TEST_BUCKET = os.getenv("OSS_TEST_BUCKET")
OSS_CNAME = os.getenv("OSS_TEST_CNAME")
OSS_REGION = os.getenv("OSS_TEST_REGION", "cn-hangzhou")

OSS_CMK = os.getenv("OSS_TEST_KMS_CMK_ID")
OSS_CMK_REGION = os.getenv("OSS_TEST_KMS_REGION")

OSS_STS_ID = os.getenv("OSS_TEST_STS_ID")
OSS_STS_KEY = os.getenv("OSS_TEST_STS_KEY")
OSS_STS_ARN = os.getenv("OSS_TEST_STS_ARN")

OSS_PAYER_UID = os.getenv("OSS_TEST_PAYER_UID")
OSS_PAYER_ID = os.getenv("OSS_TEST_PAYER_ACCESS_KEY_ID")
OSS_PAYER_SECRET = os.getenv("OSS_TEST_PAYER_ACCESS_KEY_SECRET")

OSS_INVENTORY_BUCKET_DESTINATION_ARN = os.getenv("OSS_TEST_RAM_ROLE_ARN")
OSS_INVENTORY_BUCKET_DESTINATION_ACCOUNT = os.getenv("OSS_TEST_RAM_UID")

OSS_AUTH_VERSION = None
OSS_TEST_AUTH_SERVER_HOST = os.getenv("OSS_TEST_AUTH_SERVER_HOST")

private_key = RSA.generate(1024)
public_key = private_key.publickey()
private_key_str = RsaKey.exportKey(private_key)
public_key_str = RsaKey.exportKey(public_key)
key_pair = {'private_key': private_key_str, 'public_key': public_key_str}

private_key_compact = '''-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQCokfiAVXXf5ImFzKDw+XO/UByW6mse2QsIgz3ZwBtMNu59fR5z
ttSx+8fB7vR4CN3bTztrP9A6bjoN0FFnhlQ3vNJC5MFO1PByrE/MNd5AAfSVba93
I6sx8NSk5MzUCA4NJzAUqYOEWGtGBcom6kEF6MmR1EKib1Id8hpooY5xaQIDAQAB
AoGAOPUZgkNeEMinrw31U3b2JS5sepG6oDG2CKpPu8OtdZMaAkzEfVTJiVoJpP2Y
nPZiADhFW3e0ZAnak9BPsSsySRaSNmR465cG9tbqpXFKh9Rp/sCPo4Jq2n65yood
JBrnGr6/xhYvNa14sQ6xjjfSgRNBSXD1XXNF4kALwgZyCAECQQDV7t4bTx9FbEs5
36nAxPsPM6aACXaOkv6d9LXI7A0J8Zf42FeBV6RK0q7QG5iNNd1WJHSXIITUizVF
6aX5NnvFAkEAybeXNOwUvYtkgxF4s28s6gn11c5HZw4/a8vZm2tXXK/QfTQrJVXp
VwxmSr0FAajWAlcYN/fGkX1pWA041CKFVQJAG08ozzekeEpAuByTIOaEXgZr5MBQ
gBbHpgZNBl8Lsw9CJSQI15wGfv6yDiLXsH8FyC9TKs+d5Tv4Cvquk0efOQJAd9OC
lCKFs48hdyaiz9yEDsc57PdrvRFepVdj/gpGzD14mVerJbOiOF6aSV19ot27u4on
Td/3aifYs0CveHzFPQJAWb4LCDwqLctfzziG7/S7Z74gyq5qZF4FUElOAZkz718E
yZvADwuz/4aK0od0lX9c4Jp7Mo5vQ4TvdoBnPuGoyw==
-----END RSA PRIVATE KEY-----'''

public_key_compact = '''-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAKiR+IBVdd/kiYXMoPD5c79QHJbqax7ZCwiDPdnAG0w27n19HnO21LH7
x8Hu9HgI3dtPO2s/0DpuOg3QUWeGVDe80kLkwU7U8HKsT8w13kAB9JVtr3cjqzHw
1KTkzNQIDg0nMBSpg4RYa0YFyibqQQXoyZHUQqJvUh3yGmihjnFpAgMBAAE=
-----END RSA PUBLIC KEY-----'''

key_pair_compact = {'private_key': private_key_compact, 'public_key': public_key_compact}


def random_string(n):
return ''.join(random.choice(string.ascii_lowercase) for i in range(n))

OSS_BUCKET_BASE = ''
if OSS_TEST_BUCKET is None:
OSS_BUCKET_BASE = 'oss-python-sdk-'+random_string(6)
else:
OSS_BUCKET_BASE = OSS_TEST_BUCKET + random_string(6)

def random_bytes(n):
return oss2.to_bytes(random_string(n))

def clean_and_delete_bucket(bucket):
# check if bucket is in versioning status
try:
result = bucket.get_bucket_info()
if result.versioning_status in [oss2.BUCKET_VERSIONING_ENABLE, oss2.BUCKET_VERSIONING_SUSPEND]:
next_key_marker = None
next_versionid_marker = None
is_truncated = True
while is_truncated is True:
objects = bucket.list_object_versions(key_marker=next_key_marker, versionid_marker=next_versionid_marker)
for obj in objects.versions:
bucket.delete_object(obj.key, params={'versionId': obj.versionid})
for del_marker in objects.delete_marker:
bucket.delete_object(del_marker.key, params={'versionId': del_marker.versionid})
is_truncated = objects.is_truncated
if is_truncated:
next_key_marker = objects.next_key_marker
next_versionid_marker = objects.next_versionid_marker
except:
pass

# list all upload_parts to delete
up_iter = oss2.MultipartUploadIterator(bucket)
for up in up_iter:
bucket.abort_multipart_upload(up.key, up.upload_id)

# list all objects to delete
obj_iter = oss2.ObjectIterator(bucket)
for obj in obj_iter:
bucket.delete_object(obj.key)
# list all live channels to delete
for ch_iter in oss2.LiveChannelIterator(bucket):
bucket.delete_live_channel(ch_iter.name)

# delete_bucket
bucket.delete_bucket()

def clean_and_delete_bucket_by_prefix(bucket_prefix):
service = oss2.Service(oss2.Auth(OSS_ID, OSS_SECRET), OSS_ENDPOINT)
buckets = service.list_buckets(prefix=bucket_prefix).buckets
for b in buckets:
bucket = oss2.Bucket(oss2.Auth(OSS_ID, OSS_SECRET), b.extranet_endpoint, b.name)
clean_and_delete_bucket(bucket)

def delete_keys(bucket, key_list):
if not key_list:
return

n = 100
grouped = [key_list[i:i+n] for i in range(0, len(key_list), n)]
for g in grouped:
bucket.batch_delete_objects(g)


class NonlocalObject(object):
def __init__(self, value):
self.var = value


def wait_meta_sync():
if os.environ.get('TRAVIS'):
time.sleep(5)
else:
time.sleep(1)


class OssTestCase(unittest.TestCase):
SINGLE_THREAD_CASE = 'single thread case'

def __init__(self, *args, **kwargs):
super(OssTestCase, self).__init__(*args, **kwargs)
self.bucket = None
self.prefix = random_string(12)
self.default_connect_timeout = oss2.defaults.connect_timeout
self.default_multipart_num_threads = oss2.defaults.multipart_threshold

self.default_multiget_threshold = 1024 * 1024
self.default_multiget_part_size = 100 * 1024

def setUp(self):
oss2.defaults.connect_timeout = self.default_connect_timeout
oss2.defaults.multipart_threshold = self.default_multipart_num_threads
oss2.defaults.multipart_num_threads = random.randint(1, 5)

oss2.defaults.multiget_threshold = self.default_multiget_threshold
oss2.defaults.multiget_part_size = self.default_multiget_part_size
oss2.defaults.multiget_num_threads = random.randint(1, 5)

global OSS_AUTH_VERSION
OSS_AUTH_VERSION = os.getenv('OSS_TEST_AUTH_VERSION')

self.OSS_BUCKET = OSS_BUCKET_BASE + random_string(4)
self.bucket = oss2.Bucket(oss2.make_auth(OSS_ID, OSS_SECRET, OSS_AUTH_VERSION), OSS_ENDPOINT, self.OSS_BUCKET)

try:
self.bucket.create_bucket()
except:
pass

self.rsa_crypto_bucket = oss2.CryptoBucket(oss2.make_auth(OSS_ID, OSS_SECRET, OSS_AUTH_VERSION), OSS_ENDPOINT,
self.OSS_BUCKET, crypto_provider=oss2.RsaProvider(key_pair))

self.kms_crypto_bucket = oss2.CryptoBucket(oss2.make_auth(OSS_ID, OSS_SECRET, OSS_AUTH_VERSION), OSS_ENDPOINT,
self.OSS_BUCKET, crypto_provider=oss2.AliKMSProvider(OSS_ID, OSS_SECRET,
OSS_REGION, OSS_CMK))

self.key_list = []
self.temp_files = []

def tearDown(self):
for temp_file in self.temp_files:
oss2.utils.silently_remove(temp_file)

clean_and_delete_bucket(self.bucket)
clean_and_delete_bucket_by_prefix(self.OSS_BUCKET + "-test-")

def random_key(self, suffix=''):
key = self.prefix + random_string(12) + suffix
self.key_list.append(key)

return key

def random_filename(self):
filename = random_string(16)
self.temp_files.append(filename)

return filename

def _prepare_temp_file(self, content):
fd, pathname = tempfile.mkstemp(suffix='test-upload')

os.write(fd, content)
os.close(fd)

self.temp_files.append(pathname)
return pathname

def _prepare_temp_file_with_size(self, size):
fd, pathname = tempfile.mkstemp(suffix='test-upload')

block_size = 8 * 1024 * 1024
num_written = 0

while num_written < size:
to_write = min(block_size, size - num_written)
num_written += to_write

content = 's' * to_write
os.write(fd, oss2.to_bytes(content))

os.close(fd)

self.temp_files.append(pathname)
return pathname

def retry_assert(self, func):
for i in range(5):
if func():
return
else:
time.sleep(i+2)

self.assertTrue(False)

def assertFileContent(self, filename, content):
with open(filename, 'rb') as f:
read = f.read()
self.assertEqual(len(read), len(content))
self.assertEqual(read, content)

def assertFileContentNotEqual(self, filename, content):
with open(filename, 'rb') as f:
read = f.read()
self.assertNotEqual(len(read), len(content))
self.assertNotEqual(read, content)


BIN
test/ossdemo/tests/deprecated_encrypted_1MB_a_kms View File


+ 8
- 0
test/ossdemo/tests/deprecated_encrypted_1MB_a_meta_kms.json View File

@@ -0,0 +1,8 @@
{
"x-oss-meta-oss-cek-alg": "AES/GCM/NoPadding",
"x-oss-meta-oss-crypto-key": "NWI4MTk0MzUtOTBlMS00NWZlLWE3NzUtNDMwNzhjODNkMGM1YVQ5bXF5eVJTTURneEJSRTNLSkVWUUQ1V3FIR3Q2QzhBQUFBQUFBQUFBRHFCZnFsR2hpems5QWZHQlFGSHFXSW9aZUlYZkNTU0lLZEQwUjFJTXRXM1p6ZzYxOVF4Vm8zR21acXdVZit5VFBsYUhyRmdFMkdsL2JUYjVzPQ==",
"x-oss-meta-oss-crypto-start": "NWI4MTk0MzUtOTBlMS00NWZlLWE3NzUtNDMwNzhjODNkMGM1Z1I5UGpGOGplWVROS1ptOHdqSE1kZFl6V0xvVFJ4RlFBQUFBQUFBQUFBQlFuMWZaOXA3K3VpZU5NN0dqKy9OM1F3PT0=",
"x-oss-meta-oss-wrap-alg": "kms",
"base64-plain-key":"ogO+SR1IiCiBsLxuS5tQVq/OhKQA6uaqoZQfkBcdka4=",
"plain-start":"8"
}

+ 6
- 0
test/ossdemo/tests/deprecated_encrypted_1MB_a_meta_rsa.json View File

@@ -0,0 +1,6 @@
{
"x-oss-meta-oss-cek-alg": "AES/GCM/NoPadding",
"x-oss-meta-oss-crypto-key": "Jmurs0ZAUEhIjMr+MNZnKdQR4llzdpD6sn2ijnSrJG+W7ht2TjIYXT3XWhBIlmWtkrEY1y2l0yDGAEgpRfWtH3km8Z44VEu4G7MLo0kaikoJ/d47+7/a0tSNgd968kkrvKGPpk7sl6ad2seObc+SIcCdvlI+ojXr65Trhk5OYXA=",
"x-oss-meta-oss-crypto-start": "IyhJvxPZodkceJe+ISTug0rUBS/QDOnn67sc3PSOqPikEQq4mDxP8SqvSdYcoytDcloFY/GOgKWeL4kS2ga9FJKXcsVneCrla3q1KTtl45NSNn7np6tOXBkqv3TIU0ZZ58Y1XdSPmr+8TIkjtNlBtg+48fclXoAxqcTq++u2SIc=",
"x-oss-meta-oss-wrap-alg": "rsa"
}

BIN
test/ossdemo/tests/deprecated_encrypted_1MB_a_rsa View File


BIN
test/ossdemo/tests/encrypted_cpp_example.jpg View File


+ 0
- 0
test/ossdemo/tests/encrypted_cpp_example_meta.json View File


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save