// A "CollectedHeap" is an implementation of a java heap for HotSpot. This
// is an abstract class: there may be many different kinds of heaps. This
// class defines the functions that a heap must implement, and contains
// infrastructure common to all heaps.
// // CollectedHeap // SharedHeap // GenCollectedHeap // G1CollectedHeap // ParallelScavengeHeap // class CollectedHeap : public CHeapObj { friend class VMStructs; friend class IsGCActiveMark; // Block structured external access to _is_gc_active friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe #ifdef ASSERT static int _fire_out_of_memory_count; #endif // Used for filler objects (static, but initialized in ctor). static size_t _filler_array_max_size; GCHeapLog* _gc_heap_log; // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used bool _defer_initial_card_mark; protected: MemRegion _reserved; BarrierSet* _barrier_set; bool _is_gc_active; uint _n_par_threads; unsigned int _total_collections; // ... started unsigned int _total_full_collections; // ... started NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;) // Reason for current garbage collection. Should be set to // a value reflecting no collection between collections. GCCause::Cause _gc_cause; GCCause::Cause _gc_lastcause; PerfStringVariable* _perf_gc_cause; PerfStringVariable* _perf_gc_lastcause; // Constructor CollectedHeap(); // Do common initializations that must follow instance construction, // for example, those needing virtual calls. // This code could perhaps be moved into initialize() but would // be slightly more awkward because we want the latter to be a // pure virtual. void pre_initialize(); // Create a new tlab. All TLAB allocations must go through this. virtual HeapWord* allocate_new_tlab(size_t size); // Accumulate statistics on all tlabs. virtual void accumulate_statistics_all_tlabs(); // Reinitialize tlabs before resuming mutators. virtual void resize_all_tlabs(); // Allocate from the current thread's TLAB, with broken-out slow path. inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size); static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size); // Allocate an uninitialized block of the given size, or returns NULL if // this is impossible. inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS); // Like allocate_init, but the block returned by a successful allocation // is guaranteed initialized to zeros. inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS); // Same as common_mem version, except memory is allocated in the permanent area // If there is no permanent area, revert to common_mem_allocate_noinit inline static HeapWord* common_permanent_mem_allocate_noinit(size_t size, TRAPS); // Same as common_mem version, except memory is allocated in the permanent area // If there is no permanent area, revert to common_mem_allocate_init inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS); // Helper functions for (VM) allocation. inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj, size_t size); inline static void post_allocation_setup_no_klass_install(KlassHandle klass, HeapWord* objPtr, size_t size); inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj, size_t size); inline static void post_allocation_setup_array(KlassHandle klass, HeapWord* obj, size_t size, int length); // Clears an allocated object. inline static void init_obj(HeapWord* obj, size_t size); // Filler object utilities. static inline size_t filler_array_hdr_size(); static inline size_t filler_array_min_size(); DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);) DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);) // Fill with a single array; caller must ensure filler_array_min_size() <= // words <= filler_array_max_size(). static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true); // Fill with a single object (either an int array or a java.lang.Object). static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); // Verification functions virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) PRODUCT_RETURN; virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) PRODUCT_RETURN; debug_only(static void check_for_valid_allocation_state();) public: enum Name { Abstract, SharedHeap, GenCollectedHeap, ParallelScavengeHeap, G1CollectedHeap }; static inline size_t filler_array_max_size() { return _filler_array_max_size; } virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; } /** * Returns JNI error code JNI_ENOMEM if memory could not be allocated, * and JNI_OK on success. */ virtual jint initialize() = 0; // In many heaps, there will be a need to perform some initialization activities // after the Universe is fully formed, but before general heap allocation is allowed. // This is the correct place to place such initialization methods. virtual void post_initialize() = 0; MemRegion reserved_region() const { return _reserved; } address base() const { return (address)reserved_region().start(); } // Future cleanup here. The following functions should specify bytes or // heapwords as part of their signature. virtual size_t capacity() const = 0; virtual size_t used() const = 0; // Return "true" if the part of the heap that allocates Java // objects has reached the maximal committed limit that it can // reach, without a garbage collection. virtual bool is_maximal_no_gc() const = 0; virtual size_t permanent_capacity() const = 0; virtual size_t permanent_used() const = 0; // Support for java.lang.Runtime.maxMemory(): return the maximum amount of // memory that the vm could make available for storing 'normal' java objects. // This is based on the reserved address space, but should not include space // that the vm uses internally for bookkeeping or temporary storage (e.g., // perm gen space or, in the case of the young gen, one of the survivor // spaces). virtual size_t max_capacity() const = 0; // Returns "TRUE" if "p" points into the reserved area of the heap. bool is_in_reserved(const void* p) const { return _reserved.contains(p); } bool is_in_reserved_or_null(const void* p) const { return p == NULL || is_in_reserved(p); } // Returns "TRUE" iff "p" points into the committed areas of the heap. // Since this method can be expensive in general, we restrict its // use to assertion checking only. virtual bool is_in(const void* p) const = 0; bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); } // Let's define some terms: a "closed" subset of a heap is one that // // 1) contains all currently-allocated objects, and // // 2) is closed under reference: no object in the closed subset // references one outside the closed subset. // // Membership in a heap's closed subset is useful for assertions. // Clearly, the entire heap is a closed subset, so the default // implementation is to use "is_in_reserved". But this may not be too // liberal to perform useful checking. Also, the "is_in" predicate // defines a closed subset, but may be too expensive, since "is_in" // verifies that its argument points to an object head. The // "closed_subset" method allows a heap to define an intermediate // predicate, allowing more precise checking than "is_in_reserved" at // lower cost than "is_in." // One important case is a heap composed of disjoint contiguous spaces, // such as the Garbage-First collector. Such heaps have a convenient // closed subset consisting of the allocated portions of those // contiguous spaces. // Return "TRUE" iff the given pointer points into the heap's defined // closed subset (which defaults to the entire heap). virtual bool is_in_closed_subset(const void* p) const { return is_in_reserved(p); } bool is_in_closed_subset_or_null(const void* p) const { return p == NULL || is_in_closed_subset(p); } // XXX is_permanent() and is_in_permanent() should be better named // to distinguish one from the other. // Returns "TRUE" if "p" is allocated as "permanent" data. // If the heap does not use "permanent" data, returns the same // value is_in_reserved() would return. // NOTE: this actually returns true if "p" is in reserved space // for the space not that it is actually allocated (i.e. in committed // space). If you need the more conservative answer use is_permanent(). virtual bool is_in_permanent(const void *p) const = 0; #ifdef ASSERT // Returns true if "p" is in the part of the // heap being collected. virtual bool is_in_partial_collection(const void *p) = 0; #endif bool is_in_permanent_or_null(const void *p) const { return p == NULL || is_in_permanent(p); } // Returns "TRUE" if "p" is in the committed area of "permanent" data. // If the heap does not use "permanent" data, returns the same // value is_in() would return. virtual bool is_permanent(const void *p) const = 0; bool is_permanent_or_null(const void *p) const { return p == NULL || is_permanent(p); } // An object is scavengable if its location may move during a scavenge. // (A scavenge is a GC which is not a full GC.) virtual bool is_scavengable(const void *p) = 0; // Returns "TRUE" if "p" is a method oop in the // current heap, with high probability. This predicate // is not stable, in general. bool is_valid_method(oop p) const; void set_gc_cause(GCCause::Cause v) { if (UsePerfData) { _gc_lastcause = _gc_cause; _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); _perf_gc_cause->set_value(GCCause::to_string(v)); } _gc_cause = v; } GCCause::Cause gc_cause() { return _gc_cause; } // Number of threads currently working on GC tasks. uint n_par_threads() { return _n_par_threads; } // May be overridden to set additional parallelism. virtual void set_par_threads(uint t) { _n_par_threads = t; }; // Preload classes into the shared portion of the heap, and then dump // that data to a file so that it can be loaded directly by another // VM (then terminate). virtual void preload_and_dump(TRAPS) { ShouldNotReachHere(); } // Allocate and initialize instances of Class static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS); // General obj/array allocation facilities. inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS); inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS); // Special obj/array allocation facilities. // Some heaps may want to manage "permanent" data uniquely. These default // to the general routines if the heap does not support such handling. inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS); // permanent_obj_allocate_no_klass_install() does not do the installation of // the klass pointer in the newly created object (as permanent_obj_allocate() // above does). This allows for a delay in the installation of the klass // pointer that is needed during the create of klassKlass's. The // method post_allocation_install_obj_klass() is used to install the // klass pointer. inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass, int size, TRAPS); inline static void post_allocation_install_obj_klass(KlassHandle klass, oop obj, int size); inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS); // Raw memory allocation facilities // The obj and array allocate methods are covers for these methods. // The permanent allocation method should default to mem_allocate if // permanent memory isn't supported. mem_allocate() should never be // called to allocate TLABs, only individual objects. virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) = 0; virtual HeapWord* permanent_mem_allocate(size_t size) = 0; // Utilities for turning raw memory into filler objects. // // min_fill_size() is the smallest region that can be filled. // fill_with_objects() can fill arbitrary-sized regions of the heap using // multiple objects. fill_with_object() is for regions known to be smaller // than the largest array of integers; it uses a single object to fill the // region and has slightly less overhead. static size_t min_fill_size() { return size_t(align_object_size(oopDesc::header_size())); } static void fill_with_objects(HeapWord* start, size_t words, bool zap = true); static void fill_with_object(HeapWord* start, size_t words, bool zap = true); static void fill_with_object(MemRegion region, bool zap = true) { fill_with_object(region.start(), region.word_size(), zap); } static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) { fill_with_object(start, pointer_delta(end, start), zap); } // Some heaps may offer a contiguous region for shared non-blocking // allocation, via inlined code (by exporting the address of the top and // end fields defining the extent of the contiguous allocation region.) // This function returns "true" iff the heap supports this kind of // allocation. (Default is "no".) virtual bool supports_inline_contig_alloc() const { return false; } // These functions return the addresses of the fields that define the // boundaries of the contiguous allocation area. (These fields should be // physically near to one another.) virtual HeapWord** top_addr() const { guarantee(false, "inline contiguous allocation not supported"); return NULL; } virtual HeapWord** end_addr() const { guarantee(false, "inline contiguous allocation not supported"); return NULL; } // Some heaps may be in an unparseable state at certain times between // collections. This may be necessary for efficient implementation of // certain allocation-related activities. Calling this function before // attempting to parse a heap ensures that the heap is in a parsable // state (provided other concurrent activity does not introduce // unparsability). It is normally expected, therefore, that this // method is invoked with the world stopped. // NOTE: if you override this method, make sure you call // super::ensure_parsability so that the non-generational // part of the work gets done. See implementation of // CollectedHeap::ensure_parsability and, for instance, // that of GenCollectedHeap::ensure_parsability(). // The argument "retire_tlabs" controls whether existing TLABs // are merely filled or also retired, thus preventing further // allocation from them and necessitating allocation of new TLABs. virtual void ensure_parsability(bool retire_tlabs); // Return an estimate of the maximum allocation that could be performed // without triggering any collection or expansion activity. In a // generational collector, for example, this is probably the largest // allocation that could be supported (without expansion) in the youngest // generation. It is "unsafe" because no locks are taken; the result // should be treated as an approximation, not a guarantee, for use in // heuristic resizing decisions. virtual size_t unsafe_max_alloc() = 0; // Section on thread-local allocation buffers (TLABs) // If the heap supports thread-local allocation buffers, it should override // the following methods: // Returns "true" iff the heap supports thread-local allocation buffers. // The default is "no". virtual bool supports_tlab_allocation() const { return false; } // The amount of space available for thread-local allocation buffers. virtual size_t tlab_capacity(Thread *thr) const { guarantee(false, "thread-local allocation buffers not supported"); return 0; } // An estimate of the maximum allocation that could be performed // for thread-local allocation buffers without triggering any // collection or expansion activity. virtual size_t unsafe_max_tlab_alloc(Thread *thr) const { guarantee(false, "thread-local allocation buffers not supported"); return 0; } // Can a compiler initialize a new object without store barriers? // This permission only extends from the creation of a new object // via a TLAB up to the first subsequent safepoint. If such permission // is granted for this heap type, the compiler promises to call // defer_store_barrier() below on any slow path allocation of // a new object for which such initializing store barriers will // have been elided. virtual bool can_elide_tlab_store_barriers() const = 0; // If a compiler is eliding store barriers for TLAB-allocated objects, // there is probably a corresponding slow path which can produce // an object allocated anywhere. The compiler's runtime support // promises to call this function on such a slow-path-allocated // object before performing initializations that have elided // store barriers. Returns new_obj, or maybe a safer copy thereof. virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj); // Answers whether an initializing store to a new object currently // allocated at the given address doesn't need a store // barrier. Returns "true" if it doesn't need an initializing // store barrier; answers "false" if it does. virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0; // If a compiler is eliding store barriers for TLAB-allocated objects, // we will be informed of a slow-path allocation by a call // to new_store_pre_barrier() above. Such a call precedes the // initialization of the object itself, and no post-store-barriers will // be issued. Some heap types require that the barrier strictly follows // the initializing stores. (This is currently implemented by deferring the // barrier until the next slow-path allocation or gc-related safepoint.) // This interface answers whether a particular heap type needs the card // mark to be thus strictly sequenced after the stores. virtual bool card_mark_must_follow_store() const = 0; // If the CollectedHeap was asked to defer a store barrier above, // this informs it to flush such a deferred store barrier to the // remembered set. virtual void flush_deferred_store_barrier(JavaThread* thread); // Can a compiler elide a store barrier when it writes // a permanent oop into the heap? Applies when the compiler // is storing x to the heap, where x->is_perm() is true. virtual bool can_elide_permanent_oop_store_barriers() const = 0; // Does this heap support heap inspection (+PrintClassHistogram?) virtual bool supports_heap_inspection() const = 0; // Perform a collection of the heap; intended for use in implementing // "System.gc". This probably implies as full a collection as the // "CollectedHeap" supports. virtual void collect(GCCause::Cause cause) = 0; // This interface assumes that it's being called by the // vm thread. It collects the heap assuming that the // heap lock is already held and that we are executing in // the context of the vm thread. virtual void collect_as_vm_thread(GCCause::Cause cause) = 0; // Returns the barrier set for this heap BarrierSet* barrier_set() { return _barrier_set; } // Returns "true" iff there is a stop-world GC in progress. (I assume // that it should answer "false" for the concurrent part of a concurrent // collector -- dld). bool is_gc_active() const { return _is_gc_active; } // Total number of GC collections (started) unsigned int total_collections() const { return _total_collections; } unsigned int total_full_collections() const { return _total_full_collections;} // Increment total number of GC collections (started) // Should be protected but used by PSMarkSweep - cleanup for 1.4.2 void increment_total_collections(bool full = false) { _total_collections++; if (full) { increment_total_full_collections(); } } void increment_total_full_collections() { _total_full_collections++; } // Return the AdaptiveSizePolicy for the heap. virtual AdaptiveSizePolicy* size_policy() = 0; // Return the CollectorPolicy for the heap virtual CollectorPolicy* collector_policy() const = 0; // Iterate over all the ref-containing fields of all objects, calling // "cl.do_oop" on each. This includes objects in permanent memory. virtual void oop_iterate(OopClosure* cl) = 0; // Iterate over all objects, calling "cl.do_object" on each. // This includes objects in permanent memory. virtual void object_iterate(ObjectClosure* cl) = 0; // Similar to object_iterate() except iterates only // over live objects. virtual void safe_object_iterate(ObjectClosure* cl) = 0; // Behaves the same as oop_iterate, except only traverses // interior pointers contained in permanent memory. If there // is no permanent memory, does nothing. virtual void permanent_oop_iterate(OopClosure* cl) = 0; // Behaves the same as object_iterate, except only traverses // object contained in permanent memory. If there is no // permanent memory, does nothing. virtual void permanent_object_iterate(ObjectClosure* cl) = 0; // NOTE! There is no requirement that a collector implement these // functions. // // A CollectedHeap is divided into a dense sequence of "blocks"; that is, // each address in the (reserved) heap is a member of exactly // one block. The defining characteristic of a block is that it is // possible to find its size, and thus to progress forward to the next // block. (Blocks may be of different sizes.) Thus, blocks may // represent Java objects, or they might be free blocks in a // free-list-based heap (or subheap), as long as the two kinds are // distinguishable and the size of each is determinable. // Returns the address of the start of the "block" that contains the // address "addr". We say "blocks" instead of "object" since some heaps // may not pack objects densely; a chunk may either be an object or a // non-object. virtual HeapWord* block_start(const void* addr) const = 0; // Requires "addr" to be the start of a chunk, and returns its size. // "addr + size" is required to be the start of a new chunk, or the end // of the active area of the heap. virtual size_t block_size(const HeapWord* addr) const = 0; // Requires "addr" to be the start of a block, and returns "TRUE" iff // the block is an object. virtual bool block_is_obj(const HeapWord* addr) const = 0; // Returns the longest time (in ms) that has elapsed since the last // time that any part of the heap was examined by a garbage collection. virtual jlong millis_since_last_gc() = 0; // Perform any cleanup actions necessary before allowing a verification. virtual void prepare_for_verify() = 0; // Generate any dumps preceding or following a full gc void pre_full_gc_dump(); void post_full_gc_dump(); // Print heap information on the given outputStream. virtual void print_on(outputStream* st) const = 0; // The default behavior is to call print_on() on tty. virtual void print() const { print_on(tty); } // Print more detailed heap information on the given // outputStream. The default behaviour is to call print_on(). It is // up to each subclass to override it and add any additional output // it needs. virtual void print_extended_on(outputStream* st) const { print_on(st); } // Print all GC threads (other than the VM thread) // used by this heap. virtual void print_gc_threads_on(outputStream* st) const = 0; // The default behavior is to call print_gc_threads_on() on tty. void print_gc_threads() { print_gc_threads_on(tty); } // Iterator for all GC threads (other than VM thread) virtual void gc_threads_do(ThreadClosure* tc) const = 0; // Print any relevant tracing info that flags imply. // Default implementation does nothing. virtual void print_tracing_info() const = 0; // If PrintHeapAtGC is set call the appropriate routi void print_heap_before_gc() { if (PrintHeapAtGC) { Universe::print_heap_before_gc(); } if (_gc_heap_log != NULL) { _gc_heap_log->log_heap_before(); } } void print_heap_after_gc() { if (PrintHeapAtGC) { Universe::print_heap_after_gc(); } if (_gc_heap_log != NULL) { _gc_heap_log->log_heap_after(); } } // Allocate GCHeapLog during VM startup static void initialize_heap_log(); // Heap verification virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0; // Non product verification and debugging. #ifndef PRODUCT // Support for PromotionFailureALot. Return true if it's time to cause a // promotion failure. The no-argument version uses // this->_promotion_failure_alot_count as the counter. inline bool promotion_should_fail(volatile size_t* count); inline bool promotion_should_fail(); // Reset the PromotionFailureALot counters. Should be called at the end of a // GC in which promotion failure ocurred. inline void reset_promotion_should_fail(volatile size_t* count); inline void reset_promotion_should_fail(); #endif // #ifndef PRODUCT #ifdef ASSERT static int fired_fake_oom() { return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt); } #endif public: // This is a convenience method that is used in cases where // the actual number of GC worker threads is not pertinent but // only whether there more than 0. Use of this method helps // reduce the occurrence of ParallelGCThreads to uses where the // actual number may be germane. static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; } /////////////// Unit tests /////////////// NOT_PRODUCT(static void test_is_in();) };
1、https://en.wikipedia.org/wiki/Memory_management#HEAP
相关推荐
Navicat是一款强大的数据库管理和开发工具,支持多种数据库系统,如MySQL、PostgreSQL、SQLite等。以下是Navicat的下载、安装、配置连接与使用教程: 一、下载Navicat 1.访问Navicat官方网站:https://www.navicat.com.cn/download/navicat-premium。 2.在下载页面,选择适合你操作系统的版本进行下载。Navicat支持Windows、macOS和Linux等多种操作系统。 二、安装Navicat 1.双击下载好的Navicat安装包,根据安装向导的指示进行安装。 2.选择安装路径(建议不直接安装在C盘),点击“下一步”继续安装。 3.同意软件许可协议,点击“我同意”并选择“下一步”。 4.根据需要选择是否创建桌面图标,点击“下一步”继续。 5.点击“安装”开始安装过程,等待安装完成。 6.安装完成后,点击“完成”退出安装向导。 三、配置连接 1.打开Navicat软件,点击左上角的“连接”按钮或顶部菜单栏的“连接”选项。 2.在弹出的连接窗口中,选择你要连接的数据库类型(如MySQL、PostgreS
用云电商 uniCloud 版永久开源,一套 js 解决前端、后端、数据库的全栈开发 serverless 模式(微信小程序、支付宝小程序、h5、QQ小程序、百度小程序、头条小程序、Android、iOS、Vue element-ui uniCloud 版管理后台)。用云 · 让开发更简单!
高考英语3500单词第44讲(单词速记与拓展).pdf
【课件】《华为灰度管理法》.docx
网页设计 高级网页设计(Java Web)实验库.zip
本系统实现一个物流管理系统。具体功能描述如下: 1. 系统其它信息管理:主要是针对系统的其他的信息进行管理,实现了系统的模块化的管理,系统的框架建设等信息的管理,具有系统的整合性功能的建立,支撑起整个系统的平台建设。 2. 采购管理:系统采购管理,是本平台的一个初始化工作的登记,通过系统用户的用料商品的采购,进行登记管理,能够让平台最初的信息登记做到信息化的统计,方便用户在后期对采购商品的查看管理。 3. 库存管理:库存管理主要是针对采购的物料信息进行入库和出库的管理,方便了用户对物料的登记管理。 4. 供应商管理:供应商信息的管理和登记,是本系统的第三方用户相关信息的登记功能,通过供应商信息的登记,能够方便企业对供应商的查找,快速进货。 5. 配送运输:配送运输是物流管理平台管理物流信息的一个重要的功能点,通过配送运输机制的建立,就能更好地对物流信息进行管理,进行物流信息一体化的建设工作。 6. 出库入库管理:出库入库的信息管理,是本系统建设的一个重要的功能,将采购的物料信息,进行出库入库的登记,入库后,可以新增物料信息的数量,并在出库后,进行数量的减少。 7. 单据查询:针对客户单据的信息进行管理,能够针对客户的物料结算单据,进行单据的查询和登记管理,方便企业对客户的单据,进行查询查看。
工、机具、检测设备、安全防护用品等配备及要求表.docx
塔式起重机安装施工流程图.docx
【matlab仿真】说明:MATLAB入门仿真材料。 MATLAB入门仿真材料。 (Matlab entry simulation materials.) 【matlab仿真】说明:MATLAB入门仿真材料。 MATLAB入门仿真材料
3层单家独院式别墅占地面积130平方米三层别墅图纸结构.dwg
ASP基于WEB实验室设备管理系统设计(源代码+论文)【ASP】
亮点策划模板:机械化工程公司亮点项目策划书系列之预埋件施工质量控制.doc
公司试验人员授权表.docx
618节日相关的IT资源项目示例
Go 语言 安装包
【mysql开发】说明:使用ssm框架+mysql开发,这是一个J2ee项目 (used ssm framework+mysql to develop) 文件列表: .idea\junitgenerator-prj-settings.xml (357, 2019-05-31) .idea\libraries\Spring.xml (4109, 2019-05-31) .idea\misc.xml (261, 2019-05-31) .idea\modules.xml (255, 2019-05-31) .idea\workspace.xml (15651, 2019-05-31) .settings\.jsdtscope (500, 2018-04-05) .settings\org.eclipse.core.resources.prefs (278, 2020-01-14) .settings\org.eclipse.core.runtime.prefs (52, 2018-04-06) .settings\org.eclipse.jdt.core.prefs (670, 2019-
基于Python卷积神经网络人脸识别驾驶员疲劳检测与预警系统+源码+文档(毕业设计&课程设计&高分项目).zip本资源中的源码都是经过本地编译过可运行的,评审分达到95分以上。资源项目的难度比较适中,内容都是经过助教老师审定过的能够满足学习、使用需求,如果有需要的话可以放心下载使用。 基于Python卷积神经网络人脸识别驾驶员疲劳检测与预警系统+源码+文档(毕业设计&课程设计&高分项目).zip本资源中的源码都是经过本地编译过可运行的,评审分达到95分以上。资源项目的难度比较适中,内容都是经过助教老师审定过的能够满足学习、使用需求,如果有需要的话可以放心下载使用。 基于Python卷积神经网络人脸识别驾驶员疲劳检测与预警系统+源码+文档(毕业设计&课程设计&高分项目).zip本资源中的源码都是经过本地编译过可运行的,评审分达到95分以上。资源项目的难度比较适中,内容都是经过助教老师审定过的能够满足学习、使用需求,如果有需要的话可以放心下载使用。 基于Python卷积神经网络人脸识别驾驶员疲劳检测与预警系统+源码+文档(毕业设计&课程设计&高分项目).zip本资源中的源码都是经过本
这门课程是Python爬虫JS逆向进阶课程,将教授学员如何使用Python爬虫技术和JS逆向技术获取网站数据。学习者将学习如何分析网站的JS代码,破解反爬虫机制,以及如何使用Selenium和PhantomJS等工具进行模拟登录和数据抓取。课程结合实例演练和项目实践,帮助学员掌握Python爬虫和JS逆向技术的核心概念,从而实现高效的数据获取和处理。无论您是Python爬虫初学者还是有经验者,都能从中获益。
FZQ型塔机安装流程图.docx
安卓上最佳笔记应用及其源代码 概述 安卓上最佳笔记应用使得在安卓智能手机或平板电脑上记录和管理笔记变得简单易行。笔记以卡片形式呈现,你可以轻松地滚动浏览并选择。通过集成的Google Drive,你可以在线访问这些笔记。应用还包括语音笔记、待办事项列表,以及设置提醒和与他人共享笔记的功能。 这尤其重要,因为智能手机已成为商务和普通消费者的热门设备,而记笔记是最基本但可能也是最重要的商务应用之一。 无论你是仅输入文本、使用手写笔书写还是用手指涂鸦,安卓上都有许多不同的笔记应用。有些应用专为记录简单笔记设计,而其他应用具有更多功能,可以将你的智能手机变成数字笔记本或工作簿。 现在,你可以使用智能手机记录会议纪要、写下商业想法和其他灵感,或使用协作软件与同事合作并添加你的笔记。 学生可以使用安卓笔记应用(也称为笔记应用)来:在基于云的存储系统中数字化存储所有笔记和重要信息。就像使用笔和纸一样,你可以在所选择的设备上键入、涂鸦和绘制笔记。 安卓笔记应用的优势和劣势 优势: 如果操作得当,它可以组织有序,展示关系,并减少编辑工作量。将主题转化为问题使得复习变得简单。 劣势: